8 Commits

Author SHA1 Message Date
wanglihui
b62131dacd 根据自定义arangoRDD重构代码 2020-11-10 16:59:39 +08:00
wanglihui
db5ca9db08 自定义ArangoRDD 2020-10-26 09:54:14 +08:00
wanglihui
c211d99c2e 自定义ArangoRDD 2020-10-23 10:02:28 +08:00
wanglihui
db8e764e00 修改全局变量为本地变量 2020-08-21 18:08:58 +08:00
wanglihui
31e19d7a0f 修改读取arangoDb方式为分页读取。 2020-08-20 09:56:56 +08:00
wanglihui
5be662f898 修改读取arangoDb方式为分页读取。 2020-08-20 09:56:39 +08:00
wanglihui
1750549c7d 修改DIST_CIP未更新bug 2020-08-13 17:37:17 +08:00
wanglihui
5a039bb492 ignore scala.xml 2020-08-12 15:07:04 +08:00
65 changed files with 1768 additions and 11453 deletions

View File

@@ -39,6 +39,12 @@
<version>1.2.1</version> <version>1.2.1</version>
</dependency> </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<dependency> <dependency>
<groupId>com.arangodb</groupId> <groupId>com.arangodb</groupId>
<artifactId>arangodb-java-driver</artifactId> <artifactId>arangodb-java-driver</artifactId>

View File

@@ -41,9 +41,11 @@ public class BaseArangoData {
map.put(i,new ConcurrentHashMap<>()); map.put(i,new ConcurrentHashMap<>());
} }
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER); CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
long[] timeRange = getTimeRange(table); // long[] timeRange = getTimeRange(table);
Long countTotal = getCountTotal(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) { for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
String sql = getQuerySql(timeRange, i, table); // String sql = getQuerySql(timeRange, i, table);
String sql = getQuerySql(countTotal, i, table);
ReadHistoryArangoData<T> readHistoryArangoData = ReadHistoryArangoData<T> readHistoryArangoData =
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch); new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
threadPool.executor(readHistoryArangoData); threadPool.executor(readHistoryArangoData);
@@ -56,6 +58,32 @@ public class BaseArangoData {
} }
} }
public <T extends BaseDocument> ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> readHistoryData(String table, Class<T> type){
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map = new ConcurrentHashMap<>();
try {
LOG.info("开始更新"+table);
long start = System.currentTimeMillis();
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
map.put(i,new ConcurrentHashMap<>());
}
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
Long countTotal = getCountTotal(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
String sql = getQuerySql(countTotal, i, table);
ReadHistoryArangoData<T> readHistoryArangoData =
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
threadPool.executor(readHistoryArangoData);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.info("读取"+table+" arangoDB 共耗时:"+(last-start));
}catch (Exception e){
e.printStackTrace();
LOG.error("读取历史数据失败 "+e.toString());
}
return map;
}
private long[] getTimeRange(String table){ private long[] getTimeRange(String table){
long minTime = 0L; long minTime = 0L;
long maxTime = 0L; long maxTime = 0L;
@@ -81,6 +109,30 @@ public class BaseArangoData {
} }
private Long getCountTotal(String table){
long start = System.currentTimeMillis();
Long cnt = 0L;
String sql = "RETURN LENGTH("+table+")";
try {
ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
while (longs.hasNext()){
cnt = longs.next();
}
}catch (Exception e){
LOG.error(sql +"执行异常");
}
long last = System.currentTimeMillis();
LOG.info(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
return cnt;
}
private String getQuerySql(Long cnt,int threadNumber, String table){
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER + 1;
long offsetNum = threadNumber * sepNum;
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
}
private String getQuerySql(long[] timeRange,int threadNumber,String table){ private String getQuerySql(long[] timeRange,int threadNumber,String table){
long minTime = timeRange[0]; long minTime = timeRange[0];
long maxTime = timeRange[1]; long maxTime = timeRange[1];

View File

@@ -73,5 +73,51 @@ public class BaseClickhouseData {
} }
} }
public <T extends BaseDocument> HashMap<Integer, HashMap<String,ArrayList<T>>> baseDocFromCk(Supplier<String> getSqlSupplier,
Function<ResultSet,T> formatResultFunc){
long start = System.currentTimeMillis();
HashMap<Integer, HashMap<String, ArrayList<T>>> newDataMap = initializeMap();
if (newDataMap == null){
return null;
}
String sql = getSqlSupplier.get();
try {
connection = manger.getConnection();
statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql);
int i = 0;
while (resultSet.next()) {
T newDoc = formatResultFunc.apply(resultSet);
if (newDoc != null) {
i+=1;
putMapByHashcode(newDoc, newDataMap);
}
}
long last = System.currentTimeMillis();
LOG.info(sql + "\n读取"+i+"条数据,运行时间:" + (last - start));
}catch (Exception e){
e.printStackTrace();
LOG.error("获取原始数据失败 "+e.toString());
}finally {
manger.clear(statement,connection);
}
return newDataMap;
}
private <T extends BaseDocument> HashMap<Integer, HashMap<String,ArrayList<T>>> initializeMap(){
try {
HashMap<Integer, HashMap<String, ArrayList<T>>> newDataMap = new HashMap<>();
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
newDataMap.put(i, new HashMap<>());
}
return newDataMap;
}catch (Exception e){
e.printStackTrace();
LOG.error("数据初始化失败 "+e.toString());
return null;
}
}
} }

View File

@@ -44,20 +44,16 @@ public class UpdateGraphData {
long start = System.currentTimeMillis(); long start = System.currentTimeMillis();
try { try {
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
Fqdn.class,BaseDocument.class,
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument); ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument); ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", Subscriber.class,BaseDocument.class,
Subscriber.class,BaseDocument.class,
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument); ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP", updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
LocateFqdn2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument); ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
// updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN", // updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
@@ -79,6 +75,41 @@ public class UpdateGraphData {
} }
} }
public void updateArango2(){
long start = System.currentTimeMillis();
try {
updateDocument("FQDN", Fqdn.class,BaseDocument.class,
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
updateDocument("IP", Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
updateDocument("SUBSCRIBER", Subscriber.class,BaseDocument.class,
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
updateDocument("R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
// updateDocument("R_VISIT_IP2FQDN",
// VisitIp2Fqdn.class,BaseEdgeDocument.class,
// ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
updateDocument("R_LOCATE_SUBSCRIBER2IP",
LocateSubscriber2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
long last = System.currentTimeMillis();
LOG.info("iplearning application运行完毕用时"+(last - start));
}catch (Exception e){
e.printStackTrace();
}finally {
arangoManger.clean();
pool.shutdown();
}
}
private <T extends BaseDocument> void updateDocument(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap, private <T extends BaseDocument> void updateDocument(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap, ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
@@ -120,5 +151,45 @@ public class UpdateGraphData {
} }
} }
private <T extends BaseDocument> void updateDocument(String collection,
Class<? extends Document<T>> taskType,
Class<T> docmentType,
Supplier<String> getSqlSupplier,
Function<ResultSet,T> formatResultFunc){
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyData = baseArangoData.readHistoryData(collection, docmentType);
LOG.info(collection+" 读取clickhouse,封装结果集");
HashMap<Integer, HashMap<String, ArrayList<T>>> newData = baseClickhouseData.baseDocFromCk(getSqlSupplier, formatResultFunc);
try {
LOG.info(collection+" 开始更新");
long start = System.currentTimeMillis();
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
HashMap<String, ArrayList<T>> tmpNewMap = newData.get(i);
ConcurrentHashMap<String, T> tmpHisMap = historyData.get(i);
Constructor constructor = taskType.getConstructor(
HashMap.class,
ArangoDBConnect.class,
String.class,
ConcurrentHashMap.class,
CountDownLatch.class);
Document docTask = (Document)constructor.newInstance(tmpNewMap, arangoManger, collection, tmpHisMap, countDownLatch);
pool.executor(docTask);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.info(collection+" 更新完毕,共耗时:"+(last-start));
}catch (Exception e){
e.printStackTrace();
LOG.error("更新"+collection+"失败!!"+e.toString());
}finally {
newData.clear();
historyData.clear();
}
}
} }

View File

@@ -264,8 +264,8 @@ public class ReadClickhouseData {
public static String getVertexIpSql() { public static String getVertexIpSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime; String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP"; String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP"; String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))"; return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
} }

View File

@@ -50,6 +50,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
long s = System.currentTimeMillis(); long s = System.currentTimeMillis();
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type); ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
if (docs != null) { if (docs != null) {
ArrayList<T> list = new ArrayList<>();
List<T> baseDocuments = docs.asListRemaining(); List<T> baseDocuments = docs.asListRemaining();
int i = 0; int i = 0;
for (T doc : baseDocuments) { for (T doc : baseDocuments) {
@@ -58,9 +59,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
case "R_LOCATE_FQDN2IP": case "R_LOCATE_FQDN2IP":
updateProtocolDocument(doc); updateProtocolDocument(doc);
deleteDistinctClientIpByTime(doc); deleteDistinctClientIpByTime(doc);
break; list.add(doc);
case "R_VISIT_IP2FQDN":
updateProtocolDocument(doc);
break; break;
default: default:
} }
@@ -69,6 +68,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
tmpMap.put(key, doc); tmpMap.put(key, doc);
i++; i++;
} }
arangoConnect.overwrite(list,table);
long l = System.currentTimeMillis(); long l = System.currentTimeMillis();
LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s)); LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
} }
@@ -99,10 +99,11 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS"); ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600); distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
Collections.sort(distCipTs); Collections.sort(distCipTs);
Collections.reverse(distCipTs);
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600); int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
String[] distCipArr = new String[index]; String[] distCipArr = new String[index];
long[] disCipTsArr = new long[index]; long[] disCipTsArr = new long[index];
if (distCip.size() + 1 == distCipTs.size()){ if (index != 0 && distCip.size() + 1 == distCipTs.size()){
for (int i = 0; i < index; i++) { for (int i = 0; i < index; i++) {
distCipArr[i] = distCip.get(i); distCipArr[i] = distCip.get(i);
disCipTsArr[i] = distCipTs.get(i); disCipTsArr[i] = distCipTs.get(i);

View File

@@ -6,7 +6,8 @@ public class IpLearningApplicationTest {
public static void main(String[] args) { public static void main(String[] args) {
UpdateGraphData updateGraphData = new UpdateGraphData(); UpdateGraphData updateGraphData = new UpdateGraphData();
updateGraphData.updateArango(); // updateGraphData.updateArango();
updateGraphData.updateArango2();
} }
} }

View File

@@ -17,7 +17,7 @@ thread.await.termination.time=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围 #读取clickhouse时间范围方式0读取过去一小时1指定时间范围
time.limit.type=1 time.limit.type=0
read.clickhouse.max.time=1596684142 read.clickhouse.max.time=1596684142
read.clickhouse.min.time=1596425769 read.clickhouse.min.time=1596425769

View File

@@ -11,6 +11,7 @@ import java.util.List;
public class TestList { public class TestList {
public static void main(String[] args) { public static void main(String[] args) {
/*
ArangoDBConnect arangoConnect = ArangoDBConnect.getInstance(); ArangoDBConnect arangoConnect = ArangoDBConnect.getInstance();
ArangoCursor<BaseEdgeDocument> documents = arangoConnect.executorQuery("FOR doc IN R_LOCATE_FQDN2IP filter doc.FIRST_FOUND_TIME >= 1596080839 and doc.FIRST_FOUND_TIME <= 1596395473 RETURN doc", BaseEdgeDocument.class); ArangoCursor<BaseEdgeDocument> documents = arangoConnect.executorQuery("FOR doc IN R_LOCATE_FQDN2IP filter doc.FIRST_FOUND_TIME >= 1596080839 and doc.FIRST_FOUND_TIME <= 1596395473 RETURN doc", BaseEdgeDocument.class);
List<BaseEdgeDocument> baseEdgeDocuments = documents.asListRemaining(); List<BaseEdgeDocument> baseEdgeDocuments = documents.asListRemaining();
@@ -18,8 +19,8 @@ public class TestList {
doc.updateAttribute("PROTOCOL_TYPE","123"); doc.updateAttribute("PROTOCOL_TYPE","123");
} }
*/
/*
ArrayList<Integer> integers = new ArrayList<>(); ArrayList<Integer> integers = new ArrayList<>();
integers.add(10); integers.add(10);
integers.add(8); integers.add(8);
@@ -39,7 +40,9 @@ public class TestList {
integers.add(5); integers.add(5);
Collections.sort(integers); Collections.sort(integers);
System.out.println(integers); System.out.println(integers);
Collections.reverse(integers);
System.out.println(integers);
System.out.println(integers.indexOf(5)); System.out.println(integers.indexOf(5));
*/
} }
} }

View File

@@ -0,0 +1,53 @@
package cn.ac.iie;
import cn.ac.iie.dao.BaseArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.junit.After;
import org.junit.Test;
import java.util.Enumeration;
import java.util.concurrent.ConcurrentHashMap;
public class TestReadArango {
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
private static BaseArangoData baseArangoData = new BaseArangoData();
@Test
public void testReadFqdnFromArango() {
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyData =
baseArangoData.readHistoryData("FQDN", BaseDocument.class);
printMap(historyData);
}
@Test
public void testReadFqdnLocIpFromArango() {
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> ip =
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", BaseEdgeDocument.class);
printMap(ip);
}
private <T extends BaseDocument> void printMap(ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyData) {
ConcurrentHashMap<String, T> map = historyData.get(2);
Enumeration<String> keys = map.keys();
while (keys.hasMoreElements()) {
String key = keys.nextElement();
T document = map.get(key);
System.out.println(document.toString());
}
}
@After
public void clearSource() {
pool.shutdown();
arangoManger.clean();
}
}

View File

@@ -0,0 +1,46 @@
package cn.ac.iie;
import cn.ac.iie.dao.BaseClickhouseData;
import cn.ac.iie.service.ingestion.ReadClickhouseData;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.junit.Test;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Set;
public class TestReadClickhouse {
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
@Test
public void testReadFqdnFromCk(){
HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newData =
baseClickhouseData.baseDocFromCk(ReadClickhouseData::getVertexFqdnSql,
ReadClickhouseData::getVertexFqdnDocument);
printMap(newData);
}
@Test
public void testReadFqdnLocIpFromCk(){
HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> map =
baseClickhouseData.baseDocFromCk(ReadClickhouseData::getRelationshipFqdnAddressIpSql,
ReadClickhouseData::getRelationFqdnAddressIpDocument);
printMap(map);
}
private<T extends BaseDocument> void printMap(HashMap<Integer, HashMap<String, ArrayList<T>>> newData){
HashMap<String, ArrayList<T>> map = newData.get(1);
Set<String> strings = map.keySet();
for (String key:strings){
ArrayList<T> baseDocuments = map.get(key);
System.out.println(baseDocuments.get(0));
}
}
}

View File

@@ -1,9 +0,0 @@
# Created by .ignore support plugin (hsz.mobi)
### Example user template template
### Example user template
# IntelliJ project files
.idea
*.iml
target
logs/

View File

@@ -1,90 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.ac.iie</groupId>
<artifactId>ip-learning-java-test</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.21</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.21</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>0.2.4</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.10</version>
</dependency>
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>config</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>com.arangodb</groupId>
<artifactId>arangodb-java-driver</artifactId>
<version>6.6.3</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.6</version>
<configuration>
<archive>
<manifest>
<mainClass>cn.ac.iie.test.IpLearningApplicationTest</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,39 +0,0 @@
package cn.ac.iie.config;
import cn.ac.iie.utils.ConfigUtils;
public class ApplicationConfig {
public static final String ARANGODB_HOST = ConfigUtils.getStringProperty( "arangoDB.host");
public static final Integer ARANGODB_PORT = ConfigUtils.getIntProperty("arangoDB.port");
public static final String ARANGODB_USER = ConfigUtils.getStringProperty( "arangoDB.user");
public static final String ARANGODB_PASSWORD = ConfigUtils.getStringProperty( "arangoDB.password");
public static final String ARANGODB_DB_NAME = ConfigUtils.getStringProperty( "arangoDB.DB.name");
public static final Integer ARANGODB_TTL = ConfigUtils.getIntProperty( "arangoDB.ttl");
public static final Integer ARANGODB_BATCH = ConfigUtils.getIntProperty( "arangoDB.batch");
public static final Integer UPDATE_ARANGO_BATCH =ConfigUtils.getIntProperty("update.arango.batch");
public static final Integer THREAD_POOL_NUMBER = ConfigUtils.getIntProperty( "thread.pool.number");
public static final Integer THREAD_AWAIT_TERMINATION_TIME = ConfigUtils.getIntProperty( "thread.await.termination.time");
public static final Long READ_CLICKHOUSE_MAX_TIME = ConfigUtils.getLongProperty("read.clickhouse.max.time");
public static final Long READ_CLICKHOUSE_MIN_TIME = ConfigUtils.getLongProperty("read.clickhouse.min.time");
public static final Integer CLICKHOUSE_TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("clickhouse.time.limit.type");
public static final Integer UPDATE_INTERVAL = ConfigUtils.getIntProperty("update.interval");
public static final Integer DISTINCT_CLIENT_IP_NUM = ConfigUtils.getIntProperty("distinct.client.ip.num");
public static final Integer RECENT_COUNT_HOUR = ConfigUtils.getIntProperty("recent.count.hour");
public static final String TOP_DOMAIN_FILE_NAME = ConfigUtils.getStringProperty("top.domain.file.name");
public static final String ARANGODB_READ_LIMIT = ConfigUtils.getStringProperty("arangoDB.read.limit");
public static final Integer ARANGO_TIME_LIMIT_TYPE = ConfigUtils.getIntProperty("arango.time.limit.type");
public static final Long READ_ARANGO_MAX_TIME = ConfigUtils.getLongProperty("read.arango.max.time");
public static final Long READ_ARANGO_MIN_TIME = ConfigUtils.getLongProperty("read.arango.min.time");
}

View File

@@ -1,103 +0,0 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.service.read.ReadHistoryArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* 获取arangoDB历史数据
*
* @author wlh
*/
public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
<T extends BaseDocument> void readHistoryData(String table,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
Class<T> type) {
try {
LOG.info("开始更新" + table);
long start = System.currentTimeMillis();
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
historyMap.put(i, new ConcurrentHashMap<>());
}
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
long[] timeRange = getTimeRange(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
String sql = getQuerySql(timeRange, i, table);
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.info("读取" + table + " arangoDB 共耗时:" + (last - start));
} catch (Exception e) {
e.printStackTrace();
}
}
private long[] getTimeRange(String table) {
long minTime = 0L;
long maxTime = 0L;
long startTime = System.currentTimeMillis();
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE) {
case 0:
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
try {
if (timeDoc != null) {
while (timeDoc.hasNext()) {
BaseDocument doc = timeDoc.next();
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
}
} else {
LOG.warn("获取ArangoDb时间范围为空");
}
} catch (Exception e) {
e.printStackTrace();
}
break;
case 1:
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME;
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME;
break;
default:
}
long lastTime = System.currentTimeMillis();
LOG.info(sql + "\n查询最大最小时间用时" + (lastTime - startTime));
return new long[]{minTime, maxTime};
}
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
long minTime = timeRange[0];
long maxTime = timeRange[1];
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER;
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
long minThreadTime = minTime + threadNumber * diffTime;
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT + " RETURN doc";
}
}

View File

@@ -1,79 +0,0 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.ClickhouseConnect;
import com.alibaba.druid.pool.DruidPooledConnection;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.function.Function;
import java.util.function.Supplier;
import static cn.ac.iie.service.read.ReadClickhouseData.putMapByHashcode;
/**
* 读取clickhouse数据封装到map
* @author wlh
*/
public class BaseClickhouseData {
private static final Logger LOG = LoggerFactory.getLogger(BaseClickhouseData.class);
static HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newVertexFqdnMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newVertexIpMap = new HashMap<>();
static HashMap<Integer, HashMap<String,ArrayList<BaseDocument>>> newVertexSubscriberMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationFqdnAddressIpMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationIpVisitFqdnMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationSubsciberLocateIpMap = new HashMap<>();
static HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> newRelationFqdnSameFqdnMap = new HashMap<>();
private static ClickhouseConnect manger = ClickhouseConnect.getInstance();
private DruidPooledConnection connection;
private Statement statement;
<T extends BaseDocument> void baseDocumentFromClickhouse(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
Supplier<String> getSqlSupplier,
Function<ResultSet,T> formatResultFunc){
long start = System.currentTimeMillis();
initializeMap(newMap);
String sql = getSqlSupplier.get();
LOG.info(sql);
try {
connection = manger.getConnection();
statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery(sql);
int i = 0;
while (resultSet.next()) {
T newDoc = formatResultFunc.apply(resultSet);
if (newDoc != null) {
i+=1;
putMapByHashcode(newDoc, newMap);
}
}
long last = System.currentTimeMillis();
LOG.info("读取"+i+"条数据,运行时间:" + (last - start));
}catch (Exception e){
e.printStackTrace();
}finally {
manger.clear(statement,connection);
}
}
private <T extends BaseDocument> void initializeMap(HashMap<Integer, HashMap<String,ArrayList<T>>> map){
try {
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
map.put(i, new HashMap<>(16));
}
}catch (Exception e){
e.printStackTrace();
LOG.error("初始化数据失败");
}
}
}

View File

@@ -1,116 +0,0 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.service.read.ReadClickhouseData;
import cn.ac.iie.service.update.Document;
import cn.ac.iie.service.update.relationship.LocateFqdn2Ip;
import cn.ac.iie.service.update.relationship.SameFqdn2Fqdn;
import cn.ac.iie.service.update.relationship.VisitIp2Fqdn;
import cn.ac.iie.service.update.vertex.Fqdn;
import cn.ac.iie.service.update.vertex.Ip;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.function.Function;
import java.util.function.Supplier;
import static cn.ac.iie.dao.BaseArangoData.*;
import static cn.ac.iie.dao.BaseClickhouseData.*;
/**
* 更新图数据库业务类
* @author wlh
*/
public class UpdateGraphData {
private static final Logger LOG = LoggerFactory.getLogger(UpdateGraphData.class);
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
private static BaseArangoData baseArangoData = new BaseArangoData();
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
public void updateArango(){
long start = System.currentTimeMillis();
try {
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN",
Fqdn.class,BaseDocument.class,
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
updateDocument(newVertexIpMap,historyVertexIpMap,"IP",
Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP",
LocateFqdn2Ip.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
updateDocument(newRelationIpVisitFqdnMap,historyRelationIpVisitFqdnMap,"R_VISIT_IP2FQDN",
VisitIp2Fqdn.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
updateDocument(newRelationFqdnSameFqdnMap,historyRelationFqdnSameFqdnMap,"R_SAME_ORIGIN_FQDN2FQDN",
SameFqdn2Fqdn.class,BaseEdgeDocument.class,
ReadClickhouseData::getRelationshipFqdnSameFqdnSql,ReadClickhouseData::getRelationshipFqdnSameFqdnDocument);
long last = System.currentTimeMillis();
LOG.info("更新图数据库时间共计:"+(last - start));
}catch (Exception e){
e.printStackTrace();
}finally {
arangoManger.clean();
pool.shutdown();
}
}
private <T extends BaseDocument> void updateDocument(HashMap<Integer, HashMap<String, ArrayList<T>>> newMap,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
String collection,
Class<? extends Document<T>> taskType,
Class<T> docmentType,
Supplier<String> getSqlSupplier,
Function<ResultSet,T> formatResultFunc) {
try {
baseArangoData.readHistoryData(collection,historyMap,docmentType);
LOG.info(collection+" 读取clickhouse,封装结果集");
baseClickhouseData.baseDocumentFromClickhouse(newMap, getSqlSupplier,formatResultFunc);
LOG.info(collection+" 开始更新");
long start = System.currentTimeMillis();
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
HashMap<String, ArrayList<T>> tmpNewMap = newMap.get(i);
ConcurrentHashMap<String, T> tmpHisMap = historyMap.get(i);
Constructor constructor = taskType.getConstructor(
HashMap.class,
ArangoDBConnect.class,
String.class,
ConcurrentHashMap.class,
CountDownLatch.class);
Document docTask = (Document)constructor.newInstance(tmpNewMap, arangoManger, collection, tmpHisMap, countDownLatch);
pool.executor(docTask);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.info(collection+" 更新完毕,共耗时:"+(last-start));
}catch (Exception e){
e.printStackTrace();
}finally {
newMap.clear();
historyMap.clear();
}
}
}

View File

@@ -1,338 +0,0 @@
package cn.ac.iie.service.read;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.TopDomainUtils;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.regex.Pattern;
/**
* @author wlh
*/
public class ReadClickhouseData {
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
private static Pattern pattern = Pattern.compile("^[\\d]*$");
private static final Logger LOG = LoggerFactory.getLogger(ReadClickhouseData.class);
private static long[] timeLimit = getTimeLimit();
private static long maxTime = timeLimit[0];
private static long minTime = timeLimit[1];
public static final Integer DISTINCT_CLIENT_IP_NUM = ApplicationConfig.DISTINCT_CLIENT_IP_NUM;
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR;
public static final HashSet<String> PROTOCOL_SET;
static {
PROTOCOL_SET = new HashSet<>();
PROTOCOL_SET.add("HTTP");
PROTOCOL_SET.add("TLS");
PROTOCOL_SET.add("DNS");
}
public static BaseDocument getVertexFqdnDocument(ResultSet resultSet){
BaseDocument newDoc = null;
try {
String fqdnOrReferer = resultSet.getString("FQDN");
String fqdnName = TopDomainUtils.getDomainFromUrl(fqdnOrReferer);
if (isDomain(fqdnName)) {
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
newDoc = new BaseDocument();
newDoc.setKey(fqdnName);
newDoc.addAttribute("FQDN_NAME", fqdnName);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
}
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseDocument getVertexIpDocument(ResultSet resultSet){
BaseDocument newDoc = new BaseDocument();
try {
String ip = resultSet.getString("IP");
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long sessionCount = resultSet.getLong("SESSION_COUNT");
long bytesSum = resultSet.getLong("BYTES_SUM");
String ipType = resultSet.getString("ip_type");
newDoc.setKey(ip);
newDoc.addAttribute("IP", ip);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
switch (ipType) {
case "client":
newDoc.addAttribute("CLIENT_SESSION_COUNT", sessionCount);
newDoc.addAttribute("CLIENT_BYTES_SUM", bytesSum);
newDoc.addAttribute("SERVER_SESSION_COUNT", 0L);
newDoc.addAttribute("SERVER_BYTES_SUM", 0L);
break;
case "server":
newDoc.addAttribute("SERVER_SESSION_COUNT", sessionCount);
newDoc.addAttribute("SERVER_BYTES_SUM", bytesSum);
newDoc.addAttribute("CLIENT_SESSION_COUNT", 0L);
newDoc.addAttribute("CLIENT_BYTES_SUM", 0L);
break;
default:
newDoc.addAttribute("SERVER_SESSION_COUNT", 0L);
newDoc.addAttribute("SERVER_BYTES_SUM", 0L);
newDoc.addAttribute("CLIENT_SESSION_COUNT", 0L);
newDoc.addAttribute("CLIENT_BYTES_SUM", 0L);
break;
}
// newDoc.addAttribute("COMMON_LINK_INFO", "");
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseDocument getVertexSubscriberDocument(ResultSet resultSet){
BaseDocument newDoc = new BaseDocument();
try {
String subscriberId = resultSet.getString("common_subscriber_id");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
newDoc.setKey(subscriberId);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationshipSubsciberLocateIpDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = new BaseEdgeDocument();
try {
String subscriberId = resultSet.getString("common_subscriber_id");
String framedIp = resultSet.getString("radius_framed_ip");
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
String key = subscriberId + "-" + framedIp;
newDoc.setKey(key);
newDoc.setFrom("SUBSCRIBER/" + subscriberId);
newDoc.setTo("IP/" + framedIp);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("COUNT_TOTAL", countTotal);
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationFqdnAddressIpDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = null;
try {
String vFqdn = resultSet.getString("FQDN");
if (isDomain(vFqdn)) {
String vIp = resultSet.getString("common_server_ip");
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
String[] distCipRecents = (String[]) resultSet.getArray("DIST_CIP_RECENT").getArray();
long[] clientIpTs = new long[distCipRecents.length];
for (int i = 0; i < clientIpTs.length; i++) {
clientIpTs[i] = currentHour;
}
String key = vFqdn + "-" + vIp;
newDoc = new BaseEdgeDocument();
newDoc.setKey(key);
newDoc.setFrom("FQDN/" + vFqdn);
newDoc.setTo("IP/" + vIp);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("CNT_TOTAL",countTotal);
newDoc.addAttribute("DIST_CIP", distCipRecents);
newDoc.addAttribute("DIST_CIP_TS", clientIpTs);
}
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationshipFqdnSameFqdnDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = null;
try {
String domainFqdn = resultSet.getString("domainFqdn");
String referer = resultSet.getString("referer");
String refererFqdn = TopDomainUtils.getDomainFromUrl(referer);
if (isDomain(refererFqdn) && isDomain(domainFqdn)){
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
String key = domainFqdn + "-" + refererFqdn;
newDoc = new BaseEdgeDocument();
newDoc.setKey(key);
newDoc.setFrom("FQDN/" + domainFqdn);
newDoc.setTo("FQDN/" + refererFqdn);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("CNT_TOTAL",countTotal);
}
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static BaseEdgeDocument getRelationIpVisitFqdnDocument(ResultSet resultSet){
BaseEdgeDocument newDoc = null;
try {
String vFqdn = resultSet.getString("FQDN");
if (isDomain(vFqdn)) {
String vIp = resultSet.getString("common_client_ip");
String key = vIp + "-" + vFqdn;
long firstFoundTime = resultSet.getLong("FIRST_FOUND_TIME");
long lastFoundTime = resultSet.getLong("LAST_FOUND_TIME");
long countTotal = resultSet.getLong("COUNT_TOTAL");
newDoc = new BaseEdgeDocument();
newDoc.setKey(key);
newDoc.setFrom("IP/" + vIp);
newDoc.setTo("FQDN/" + vFqdn);
newDoc.addAttribute("CNT_TOTAL",countTotal);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
}
}catch (Exception e){
e.printStackTrace();
}
return newDoc;
}
public static <T extends BaseDocument> void putMapByHashcode(T newDoc, HashMap<Integer, HashMap<String, ArrayList<T>>> map) {
if (newDoc != null) {
String key = newDoc.getKey();
int i = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
HashMap<String, ArrayList<T>> documentHashMap = map.getOrDefault(i, new HashMap<>());
ArrayList<T> documentArrayList = documentHashMap.getOrDefault(key, new ArrayList<>());
documentArrayList.add(newDoc);
documentHashMap.put(key, documentArrayList);
}
}
private static boolean isDomain(String fqdn) {
try {
if (fqdn == null || fqdn.length() == 0){
return false;
}
if (fqdn.contains(":")){
String s = fqdn.split(":")[0];
if (s.contains(":")){
return false;
}
}
String[] fqdnArr = fqdn.split("\\.");
if (fqdnArr.length < 4 || fqdnArr.length > 4) {
return true;
}
for (String f : fqdnArr) {
if (pattern.matcher(f).matches()) {
long i = Long.parseLong(f);
if (i < 0 || i > 255) {
return true;
}
} else {
return true;
}
}
} catch (Exception e) {
LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString());
}
return false;
}
private static void checkSchemaProperty(BaseEdgeDocument newDoc, String schema, long countTotal) {
long[] recentCnt = new long[RECENT_COUNT_HOUR];
recentCnt[0] = countTotal;
for (String protocol: PROTOCOL_SET){
String protocolRecent = protocol +"_CNT_RECENT";
String protocolTotal = protocol + "_CNT_TOTAL";
if (protocol.equals(schema)){
newDoc.addAttribute(protocolTotal, countTotal);
newDoc.addAttribute(protocolRecent, recentCnt);
}else {
newDoc.addAttribute(protocolTotal, 0L);
newDoc.addAttribute(protocolRecent, new long[RECENT_COUNT_HOUR]);
}
}
}
public static String getVertexFqdnSql() {
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime;
String mediaDomainSql = "SELECT s1_domain AS FQDN,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME FROM media_expire_patch WHERE "+where+" and s1_domain != '' GROUP BY s1_domain";
String refererSql = "SELECT s1_referer AS FQDN,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME FROM media_expire_patch WHERE "+where+" and s1_referer != '' GROUP BY s1_referer";
return "SELECT * FROM((" + mediaDomainSql + ") UNION ALL (" + refererSql + "))";
}
public static String getVertexIpSql() {
String where = " recv_time >= " + minTime + " AND recv_time < " + maxTime;
String clientIpSql = "SELECT s1_s_ip AS IP, MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(media_len) as BYTES_SUM,'client' as ip_type FROM media_expire_patch where " + where + " group by IP";
String serverIpSql = "SELECT s1_d_ip AS IP, MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(media_len) as BYTES_SUM,'server' as ip_type FROM media_expire_patch where " + where + " group by IP";
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
}
public static String getRelationshipFqdnAddressIpSql() {
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_d_ip != '' ";
return "SELECT s1_domain AS FQDN,s1_d_ip AS common_server_ip,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(s1_s_ip) AS DIST_CIP_RECENT FROM media_expire_patch WHERE "+where+" GROUP BY s1_d_ip,s1_domain";
}
public static String getRelationshipFqdnSameFqdnSql(){
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime +" AND s1_domain != '' AND s1_referer != '' ";
return "SELECT s1_domain AS domainFqdn,s1_referer AS referer,MIN(recv_time) AS FIRST_FOUND_TIME,MAX(recv_time) AS LAST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL FROM media_expire_patch where "+where+" GROUP BY s1_domain,s1_referer";
}
public static String getRelationshipIpVisitFqdnSql() {
String where = "recv_time >= "+minTime+" and recv_time <= "+maxTime+" AND s1_s_ip != '' AND s1_domain != '' ";
return "SELECT s1_s_ip AS common_client_ip,s1_domain AS FQDN,MIN( recv_time ) AS FIRST_FOUND_TIME,MAX( recv_time ) AS LAST_FOUND_TIME,COUNT( * ) AS COUNT_TOTAL FROM media_expire_patch WHERE "+where+" GROUP BY s1_s_ip,s1_domain";
}
public static String getVertexSubscriberSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
return "SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log WHERE" + where + " GROUP BY common_subscriber_id";
}
public static String getRelationshipSubsciberLocateIpSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_framed_ip != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
return "SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME,COUNT(*) as COUNT_TOTAL FROM radius_record_log WHERE" + where + " GROUP BY common_subscriber_id,radius_framed_ip";
}
private static long[] getTimeLimit() {
long maxTime = 0L;
long minTime = 0L;
switch (ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE) {
case 0:
maxTime = currentHour;
minTime = maxTime - ApplicationConfig.UPDATE_INTERVAL;
break;
case 1:
maxTime = ApplicationConfig.READ_CLICKHOUSE_MAX_TIME;
minTime = ApplicationConfig.READ_CLICKHOUSE_MIN_TIME;
break;
default:
}
return new long[]{maxTime, minTime};
}
}

View File

@@ -1,85 +0,0 @@
package cn.ac.iie.service.read;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import static cn.ac.iie.service.read.ReadClickhouseData.RECENT_COUNT_HOUR;
/**
* @author wlh
* 多线程全量读取arangoDb历史数据封装到map
*/
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
private ArangoDBConnect arangoConnect;
private String query;
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
private Class<T> type;
private String table;
private CountDownLatch countDownLatch;
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
String query,
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
Class<T> type,
String table,
CountDownLatch countDownLatch) {
this.arangoConnect = arangoConnect;
this.query = query;
this.map = map;
this.type = type;
this.table = table;
this.countDownLatch = countDownLatch;
}
@Override
public void run() {
try {
long s = System.currentTimeMillis();
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
if (docs != null) {
List<T> baseDocuments = docs.asListRemaining();
int i = 0;
for (T doc : baseDocuments) {
String key = doc.getKey();
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
tmpMap.put(key, doc);
i++;
}
long l = System.currentTimeMillis();
LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
}
}catch (Exception e){
e.printStackTrace();
}finally {
countDownLatch.countDown();
LOG.info("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
}
}
private void updateProtocolDocument(T doc) {
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
for (String protocol : ReadClickhouseData.PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
cntRecentsDst[0] = 0L;
doc.addAttribute(protocolRecent, cntRecentsDst);
}
}
}
}

View File

@@ -1,128 +0,0 @@
package cn.ac.iie.service.update;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class Document<T extends BaseDocument> extends Thread{
private static final Logger LOG = LoggerFactory.getLogger(Document.class);
private HashMap<String, ArrayList<T>> newDocumentMap;
private ArangoDBConnect arangoManger;
private String collectionName;
private ConcurrentHashMap<String, T> historyDocumentMap;
private CountDownLatch countDownLatch;
Document(HashMap<String, ArrayList<T>> newDocumentMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, T> historyDocumentMap,
CountDownLatch countDownLatch) {
this.newDocumentMap = newDocumentMap;
this.arangoManger = arangoManger;
this.collectionName = collectionName;
this.historyDocumentMap = historyDocumentMap;
this.countDownLatch = countDownLatch;
}
@Override
public void run() {
long start = System.currentTimeMillis();
LOG.info("新读取数据"+newDocumentMap.size()+"条,历史数据"+historyDocumentMap.size()+"");
try {
Set<String> keySet = newDocumentMap.keySet();
ArrayList<T> resultDocumentList = new ArrayList<>();
int i = 0;
for (String key : keySet) {
ArrayList<T> newDocumentSchemaList = newDocumentMap.getOrDefault(key, null);
if (newDocumentSchemaList != null) {
T newDocument = mergeDocument(newDocumentSchemaList);
i += 1;
T historyDocument = historyDocumentMap.getOrDefault(key, null);
updateDocument(newDocument,historyDocument,resultDocumentList);
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
arangoManger.overwrite(resultDocumentList, collectionName);
LOG.info("更新"+collectionName+":" + i);
i = 0;
}
}
}
if (i != 0) {
arangoManger.overwrite(resultDocumentList, collectionName);
LOG.info("更新"+collectionName+":" + i);
}
} catch (Exception e) {
e.printStackTrace();
LOG.error(e.toString());
}finally {
countDownLatch.countDown();
long last = System.currentTimeMillis();
LOG.info("本线程更新完毕,用时:"+(last-start)+",剩余线程数量:"+countDownLatch.getCount());
}
}
private void updateDocument(T newDocument, T historyDocument, ArrayList<T> resultDocumentList) {
if (historyDocument != null){
updateFunction(newDocument,historyDocument);
resultDocumentList.add(historyDocument);
}else {
resultDocumentList.add(newDocument);
}
}
protected void updateFunction(T newDocument, T historyDocument) {
Object lastFoundTime = newDocument.getAttribute("LAST_FOUND_TIME");
historyDocument.addAttribute("LAST_FOUND_TIME",lastFoundTime);
}
private T mergeDocument(ArrayList<T> newDocumentSchemaList){
if (newDocumentSchemaList == null || newDocumentSchemaList.isEmpty()){
return null;
}else if (newDocumentSchemaList.size() == 1){
return newDocumentSchemaList.get(0);
}else {
T newDocument = null;
for (T lastDoc:newDocumentSchemaList){
if (newDocument == null){
newDocument = lastDoc;
}else {
mergeFunction(lastDoc,newDocument);
}
}
return newDocument;
}
}
protected void mergeFunction(T lastDoc,T newDocument) {
putMinAttribute(lastDoc,newDocument,"FIRST_FOUND_TIME");
putMaxAttribute(lastDoc,newDocument,"LAST_FOUND_TIME");
}
protected void putMinAttribute(T firstDoc,T lastDoc,String attribute){
long firstMinAttribute = Long.parseLong(firstDoc.getAttribute(attribute).toString());
long lastMinAttribute = Long.parseLong(lastDoc.getAttribute(attribute).toString());
lastDoc.addAttribute(attribute,firstMinAttribute<lastMinAttribute? firstMinAttribute:lastMinAttribute);
}
protected void putMaxAttribute(T firstDoc,T lastDoc,String attribute){
long firstMaxAttribute = Long.parseLong(firstDoc.getAttribute(attribute).toString());
long lastMaxAttribute = Long.parseLong(lastDoc.getAttribute(attribute).toString());
lastDoc.addAttribute(attribute,firstMaxAttribute>lastMaxAttribute? firstMaxAttribute:lastMaxAttribute);
}
protected void putSumAttribute(T firstDoc,T lastDoc,String attribute){
long firstSumAttribute = Long.parseLong(firstDoc.getAttribute(attribute).toString());
long lastSumAttribute = Long.parseLong(lastDoc.getAttribute(attribute).toString());
lastDoc.addAttribute(attribute,firstSumAttribute+lastSumAttribute);
}
}

View File

@@ -1,76 +0,0 @@
package cn.ac.iie.service.update;
import cn.ac.iie.service.read.ReadClickhouseData;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class Relationship extends Document<BaseEdgeDocument> {
public Relationship(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap,arangoManger,collectionName,historyDocumentMap,countDownLatch);
}
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument){
super.updateFunction(newEdgeDocument,historyEdgeDocument);
}
protected void updateProcotol(BaseEdgeDocument historyEdgeDocument, String schema, BaseEdgeDocument newEdgeDocument){
String recentSchema = schema +"_CNT_RECENT";
String totalSchema = schema + "_CNT_TOTAL";
long countTotal = Long.parseLong(newEdgeDocument.getAttribute(totalSchema).toString());
if (countTotal > 0L){
long updateCountTotal = Long.parseLong(historyEdgeDocument.getAttribute(totalSchema).toString());
Long[] cntRecent = (Long[]) historyEdgeDocument.getAttribute(recentSchema);
cntRecent[0] = countTotal;
historyEdgeDocument.addAttribute(recentSchema, cntRecent);
historyEdgeDocument.addAttribute(totalSchema, countTotal + updateCountTotal);
String hisProtocolType = historyEdgeDocument.getAttribute("PROTOCOL_TYPE").toString();
if (!hisProtocolType.contains(schema)){
hisProtocolType = hisProtocolType + "," + schema;
historyEdgeDocument.addAttribute("PROTOCOL_TYPE",hisProtocolType);
}
}
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
}
protected void mergeProtocol(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
String schema = lastDoc.getAttribute("PROTOCOL_TYPE").toString();
if (ReadClickhouseData.PROTOCOL_SET.contains(schema)){
setProtocolProperties(schema,newDocument,lastDoc);
}
}
private void setProtocolProperties(String protocol,BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
String protocolRecent = protocol +"_CNT_RECENT";
String protocolTotal = protocol + "_CNT_TOTAL";
putSumAttribute(lastDoc,newDocument,protocolTotal);
long[] cntRecents = (long[]) lastDoc.getAttribute(protocolRecent);
newDocument.addAttribute(protocolRecent, cntRecents);
String protocolType = newDocument.getAttribute("PROTOCOL_TYPE").toString();
newDocument.addAttribute("PROTOCOL_TYPE",addProcotolType(protocolType,protocol));
}
private String addProcotolType(String protocolType,String schema){
if (!protocolType.contains(schema)){
protocolType = protocolType + "," + schema;
}
return protocolType;
}
}

View File

@@ -1,39 +0,0 @@
package cn.ac.iie.service.update;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* @author wlh
* 多线程更新vertex数据
*/
public class Vertex extends Document<BaseDocument> {
public Vertex(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
@Override
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
super.updateFunction(newDocument, historyDocument);
}
@Override
protected void mergeFunction(BaseDocument lastDoc,BaseDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
}
@Override
public void run() {
super.run();
}
}

View File

@@ -1,94 +0,0 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import static cn.ac.iie.service.read.ReadClickhouseData.*;
public class LocateFqdn2Ip extends Relationship {
public LocateFqdn2Ip(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
super.mergeFunction(lastDoc, newDocument);
mergeDistinctClientIp(lastDoc, newDocument);
putSumAttribute(lastDoc, newDocument,"CNT_TOTAL");
}
private void mergeDistinctClientIp(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument){
HashSet<String> clientIpSet = new HashSet<>();
String[] distCips = (String[]) newDocument.getAttribute("DIST_CIP");
String[] lastDistCips = (String[]) lastDoc.getAttribute("DIST_CIP");
clientIpSet.addAll(Arrays.asList(distCips));
clientIpSet.addAll(Arrays.asList(lastDistCips));
long[] clientIpTs = new long[clientIpSet.size()];
for (int i = 0; i < clientIpTs.length; i++) {
clientIpTs[i] = currentHour;
}
newDocument.addAttribute("DIST_CIP", clientIpSet.toArray());
newDocument.addAttribute("DIST_CIP_TS", clientIpTs);
}
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
super.updateFunction(newEdgeDocument, historyEdgeDocument);
updateDistinctClientIp(newEdgeDocument, historyEdgeDocument);
putSumAttribute(newEdgeDocument, historyEdgeDocument,"CNT_TOTAL");
}
private void updateDistinctClientIp(BaseEdgeDocument newEdgeDocument,BaseEdgeDocument edgeDocument){
ArrayList<String> distCip = (ArrayList<String>) edgeDocument.getAttribute("DIST_CIP");
ArrayList<Long> distCipTs = (ArrayList<Long>) edgeDocument.getAttribute("DIST_CIP_TS");
HashMap<String, Long> distCipToTs = new HashMap<>();
if (distCip.size() == distCipTs.size()){
for (int i = 0;i < distCip.size();i++){
distCipToTs.put(distCip.get(i),distCipTs.get(i));
}
}
Object[] distCipRecent = (Object[])newEdgeDocument.getAttribute("DIST_CIP");
for (Object cip:distCipRecent){
distCipToTs.put(cip.toString(), currentHour);
}
Map<String, Long> sortDistCip = sortMapByValue(distCipToTs);
edgeDocument.addAttribute("DIST_CIP",sortDistCip.keySet().toArray());
edgeDocument.addAttribute("DIST_CIP_TS",sortDistCip.values().toArray());
}
/**
* 使用 Map按value进行排序
*/
private Map<String, Long> sortMapByValue(Map<String, Long> oriMap) {
if (oriMap == null || oriMap.isEmpty()) {
return null;
}
Map<String, Long> sortedMap = new LinkedHashMap<>();
List<Map.Entry<String, Long>> entryList = new ArrayList<>(oriMap.entrySet());
entryList.sort((o1, o2) -> o2.getValue().compareTo(o1.getValue()));
if(entryList.size() > DISTINCT_CLIENT_IP_NUM){
for(Map.Entry<String, Long> set:entryList.subList(0, DISTINCT_CLIENT_IP_NUM)){
sortedMap.put(set.getKey(), set.getValue());
}
}else {
for(Map.Entry<String, Long> set:entryList){
sortedMap.put(set.getKey(), set.getValue());
}
}
return sortedMap;
}
}

View File

@@ -1,21 +0,0 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class LocateSubscriber2Ip extends Relationship {
public LocateSubscriber2Ip(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
}

View File

@@ -1,34 +0,0 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class SameFqdn2Fqdn extends Relationship {
public SameFqdn2Fqdn(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
super.updateFunction(newEdgeDocument, historyEdgeDocument);
putSumAttribute(newEdgeDocument,historyEdgeDocument,"CNT_TOTAL");
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
putSumAttribute(lastDoc,newDocument,"CNT_TOTAL");
}
}

View File

@@ -1,32 +0,0 @@
package cn.ac.iie.service.update.relationship;
import cn.ac.iie.service.update.Relationship;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class VisitIp2Fqdn extends Relationship {
public VisitIp2Fqdn(HashMap<String, ArrayList<BaseEdgeDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseEdgeDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
}
@Override
protected void updateFunction(BaseEdgeDocument newEdgeDocument, BaseEdgeDocument historyEdgeDocument) {
super.updateFunction(newEdgeDocument, historyEdgeDocument);
putSumAttribute(newEdgeDocument,historyEdgeDocument,"CNT_TOTAL");
}
@Override
protected void mergeFunction(BaseEdgeDocument lastDoc,BaseEdgeDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
putSumAttribute(lastDoc,newDocument,"CNT_TOTAL");
}
}

View File

@@ -1,21 +0,0 @@
package cn.ac.iie.service.update.vertex;
import cn.ac.iie.service.update.Vertex;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class Fqdn extends Vertex {
public Fqdn(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap,countDownLatch);
}
}

View File

@@ -1,48 +0,0 @@
package cn.ac.iie.service.update.vertex;
import cn.ac.iie.service.update.Vertex;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class Ip extends Vertex {
public Ip(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
@Override
protected void updateFunction(BaseDocument newDocument, BaseDocument historyDocument) {
super.updateFunction(newDocument, historyDocument);
updateIpByType(newDocument, historyDocument);
}
@Override
protected void mergeFunction(BaseDocument lastDoc, BaseDocument newDocument) {
super.mergeFunction(lastDoc, newDocument);
mergeIpByType(lastDoc, newDocument);
}
private void mergeIpByType(BaseDocument lastDoc, BaseDocument newDocument) {
putSumAttribute(lastDoc,newDocument,"CLIENT_SESSION_COUNT");
putSumAttribute(lastDoc,newDocument,"CLIENT_BYTES_SUM");
putSumAttribute(lastDoc,newDocument,"SERVER_SESSION_COUNT");
putSumAttribute(lastDoc,newDocument,"SERVER_BYTES_SUM");
}
private void updateIpByType(BaseDocument newDocument, BaseDocument historyDocument) {
putSumAttribute(newDocument, historyDocument, "CLIENT_SESSION_COUNT");
putSumAttribute(newDocument, historyDocument, "CLIENT_BYTES_SUM");
putSumAttribute(newDocument, historyDocument, "SERVER_SESSION_COUNT");
putSumAttribute(newDocument, historyDocument, "SERVER_BYTES_SUM");
}
}

View File

@@ -1,21 +0,0 @@
package cn.ac.iie.service.update.vertex;
import cn.ac.iie.service.update.Vertex;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.entity.BaseDocument;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class Subscriber extends Vertex {
public Subscriber(HashMap<String, ArrayList<BaseDocument>> newDocumentHashMap,
ArangoDBConnect arangoManger,
String collectionName,
ConcurrentHashMap<String, BaseDocument> historyDocumentMap,
CountDownLatch countDownLatch) {
super(newDocumentHashMap, arangoManger, collectionName, historyDocumentMap, countDownLatch);
}
}

View File

@@ -1,18 +0,0 @@
package cn.ac.iie.test;
import cn.ac.iie.dao.UpdateGraphData;
/**
* iplearning程序入口
* @author wlh
*/
public class IpLearningApplicationTest {
public static void main(String[] args) {
UpdateGraphData updateGraphData = new UpdateGraphData();
updateGraphData.updateArango();
}
}

View File

@@ -1,116 +0,0 @@
package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import com.arangodb.ArangoCollection;
import com.arangodb.ArangoCursor;
import com.arangodb.ArangoDB;
import com.arangodb.ArangoDatabase;
import com.arangodb.entity.DocumentCreateEntity;
import com.arangodb.entity.ErrorEntity;
import com.arangodb.entity.MultiDocumentEntity;
import com.arangodb.model.AqlQueryOptions;
import com.arangodb.model.DocumentCreateOptions;
import com.arangodb.util.MapBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
public class ArangoDBConnect {
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
private static ArangoDB arangoDB = null;
private static ArangoDBConnect conn = null;
static {
getArangoDatabase();
}
private static void getArangoDatabase(){
arangoDB = new ArangoDB.Builder()
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER)
.host(ApplicationConfig.ARANGODB_HOST, ApplicationConfig.ARANGODB_PORT)
.user(ApplicationConfig.ARANGODB_USER)
.password(ApplicationConfig.ARANGODB_PASSWORD)
.build();
}
public static synchronized ArangoDBConnect getInstance(){
if (null == conn){
conn = new ArangoDBConnect();
}
return conn;
}
private ArangoDatabase getDatabase(){
return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME);
}
public void clean(){
try {
if (arangoDB != null){
arangoDB.shutdown();
}
}catch (Exception e){
e.printStackTrace();
}
}
public <T> ArangoCursor<T> executorQuery(String query,Class<T> type){
ArangoDatabase database = getDatabase();
Map<String, Object> bindVars = new MapBuilder().get();
AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL);
try {
return database.query(query, bindVars, options, type);
}catch (Exception e){
e.printStackTrace();
return null;
}finally {
bindVars.clear();
}
}
@Deprecated
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
if (!docInsert.isEmpty()){
collection.importDocuments(docInsert);
}
if (!docUpdate.isEmpty()){
collection.replaceDocuments(docUpdate);
}
}catch (Exception e){
System.out.println("更新失败");
e.printStackTrace();
}finally {
docInsert.clear();
docInsert.clear();
}
}
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
if (!docOverwrite.isEmpty()){
DocumentCreateOptions documentCreateOptions = new DocumentCreateOptions();
documentCreateOptions.overwrite(true);
documentCreateOptions.silent(true);
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
for (ErrorEntity errorEntity:errors){
LOG.debug("写入arangoDB异常"+errorEntity.getErrorMessage());
}
}
}catch (Exception e){
System.out.println("更新失败:"+e.toString());
}finally {
docOverwrite.clear();
}
}
}

View File

@@ -1,103 +0,0 @@
package cn.ac.iie.utils;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.pool.DruidPooledConnection;
import java.sql.*;
import java.util.Properties;
public class ClickhouseConnect {
private static DruidDataSource dataSource = null;
private static ClickhouseConnect dbConnect = null;
private static Properties props = new Properties();
static {
getDbConnect();
}
private static void getDbConnect() {
try {
if (dataSource == null) {
dataSource = new DruidDataSource();
props.load(ClickhouseConnect.class.getClassLoader().getResourceAsStream("clickhouse.properties"));
//设置连接参数
dataSource.setUrl("jdbc:clickhouse://" + props.getProperty("db.id"));
dataSource.setDriverClassName(props.getProperty("drivers"));
dataSource.setUsername(props.getProperty("mdb.user"));
dataSource.setPassword(props.getProperty("mdb.password"));
//配置初始化大小、最小、最大
dataSource.setInitialSize(Integer.parseInt(props.getProperty("initialsize")));
dataSource.setMinIdle(Integer.parseInt(props.getProperty("minidle")));
dataSource.setMaxActive(Integer.parseInt(props.getProperty("maxactive")));
//配置获取连接等待超时的时间
dataSource.setMaxWait(30000);
//配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
dataSource.setTimeBetweenEvictionRunsMillis(2000);
//防止过期
dataSource.setValidationQuery("SELECT 1");
dataSource.setTestWhileIdle(true);
dataSource.setTestOnBorrow(true);
dataSource.setKeepAlive(true);
}
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 数据库连接池单例
*
* @return dbConnect
*/
public static synchronized ClickhouseConnect getInstance() {
if (null == dbConnect) {
dbConnect = new ClickhouseConnect();
}
return dbConnect;
}
/**
* 返回druid数据库连接
*
* @return 连接
* @throws SQLException sql异常
*/
public DruidPooledConnection getConnection() throws SQLException {
return dataSource.getConnection();
}
/**
* 清空PreparedStatement、Connection对象未定义的置空。
*
* @param pstmt PreparedStatement对象
* @param connection Connection对象
*/
public void clear(Statement pstmt, Connection connection) {
try {
if (pstmt != null) {
pstmt.close();
}
if (connection != null) {
connection.close();
}
} catch (SQLException e) {
e.printStackTrace();
}
}
public ResultSet executorQuery(String query,Connection connection,Statement pstm){
// Connection connection = null;
// Statement pstm = null;
try {
connection = getConnection();
pstm = connection.createStatement();
return pstm.executeQuery(query);
}catch (Exception e){
e.printStackTrace();
return null;
}
}
}

View File

@@ -1,36 +0,0 @@
package cn.ac.iie.utils;
import java.util.Properties;
public class ConfigUtils {
private static Properties propCommon = new Properties();
public static String getStringProperty(String key) {
return propCommon.getProperty(key);
}
public static Integer getIntProperty(String key) {
return Integer.parseInt(propCommon.getProperty(key));
}
public static Long getLongProperty(String key) {
return Long.parseLong(propCommon.getProperty(key));
}
public static Boolean getBooleanProperty(String key) {
return "true".equals(propCommon.getProperty(key).toLowerCase().trim());
}
static {
try {
propCommon.load(ConfigUtils.class.getClassLoader().getResourceAsStream("application.properties"));
System.out.println("application.properties加载成功");
} catch (Exception e) {
propCommon = null;
System.err.println("配置加载失败");
}
}
}

View File

@@ -1,67 +0,0 @@
package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.concurrent.*;
/**
* 线程池管理
* @author wlh
*/
public class ExecutorThreadPool {
private static ExecutorService pool = null ;
private static ExecutorThreadPool poolExecutor = null;
static {
getThreadPool();
}
private static void getThreadPool(){
ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("iplearning-application-pool-%d").build();
//Common Thread Pool
pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER*2,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
// pool = Executors.newFixedThreadPool(ApplicationConfig.THREAD_POOL_NUMBER);
}
public static ExecutorThreadPool getInstance(){
if (null == poolExecutor){
poolExecutor = new ExecutorThreadPool();
}
return poolExecutor;
}
public void executor(Runnable command){
pool.execute(command);
}
@Deprecated
public void awaitThreadTask(){
try {
while (!pool.awaitTermination(ApplicationConfig.THREAD_AWAIT_TERMINATION_TIME, TimeUnit.SECONDS)) {
System.out.println("线程池没有关闭");
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void shutdown(){
pool.shutdown();
}
@Deprecated
public static Long getThreadNumber(){
String name = Thread.currentThread().getName();
String[] split = name.split("-");
return Long.parseLong(split[3]);
}
}

View File

@@ -1,158 +0,0 @@
package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.net.URL;
import java.util.HashMap;
public class TopDomainUtils {
private static Logger logger = LoggerFactory.getLogger(TopDomainUtils.class);
public static String getSecDomain(String urlDomain, HashMap<String, HashMap<String, String>> maps) {
String[] split = urlDomain.split("\\.");
String secDomain = null;
for (int i = split.length - 1; i >= 0; i--) {
int mapsIndex = split.length - (i + 1);
HashMap<String, String> innerMap = maps.get("map_id_" + mapsIndex);
HashMap<String, String> fullTop = maps.get("full");
if (!(innerMap.containsKey(split[i]))) {
StringBuilder strSec = new StringBuilder();
for (int j = i; j < split.length; j++) {
strSec.append(split[j]).append(".");
}
secDomain = strSec.substring(0, strSec.length() - 1);
if (fullTop.containsKey(getTopDomainFromSecDomain(secDomain))) {
break;
} else {
while (!fullTop.containsKey(getTopDomainFromSecDomain(secDomain)) && getTopDomainFromSecDomain(secDomain).contains(".")) {
secDomain = getTopDomainFromSecDomain(secDomain);
}
break;
}
}
}
return secDomain;
}
private static String getTopDomainFromSecDomain(String secDomain) {
String quFirstDian = secDomain;
if (secDomain.contains(".")) {
quFirstDian = secDomain.substring(secDomain.indexOf(".")).substring(1);
}
return quFirstDian;
}
private static File getTopDomainFile(){
URL url = TopDomainUtils.class.getClassLoader().getResource(ApplicationConfig.TOP_DOMAIN_FILE_NAME);
File file = null;
if (url!=null){
file = new File(url.getFile());
}
if (file != null && file.isFile() && file.exists()){
return file;
}
return null;
}
public static HashMap<String, HashMap<String, String>> readTopDomainFile() {
URL url = TopDomainUtils.class.getClassLoader().getResource(ApplicationConfig.TOP_DOMAIN_FILE_NAME);
assert url != null;
HashMap<String, HashMap<String, String>> maps = makeHashMap(url.getFile());
try {
String encoding = "UTF-8";
File file = new File(url.getFile());
if (file.isFile() && file.exists()) {
InputStreamReader read = new InputStreamReader(
new FileInputStream(file), encoding);
BufferedReader bufferedReader = new BufferedReader(read);
String lineTxt;
while ((lineTxt = bufferedReader.readLine()) != null) {
HashMap<String, String> fullTop = maps.get("full");
fullTop.put(lineTxt, lineTxt);
maps.put("full", fullTop);
String[] split = lineTxt.split("\\.");
for (int i = split.length - 1; i >= 0; i--) {
int mapsIndex = split.length - (i + 1);
HashMap<String, String> innerMap = maps.get("map_id_" + mapsIndex);
innerMap.put(split[i], split[i]);
maps.put("map_id_" + mapsIndex, innerMap);
}
}
read.close();
}
} catch (Exception e) {
logger.error("TopDomainUtils>=>readTopDomainFile get filePathData error--->{" + e + "}<---");
e.printStackTrace();
}
return maps;
}
private static int getMaxLength(String filePath) {
int lengthDomain = 0;
try {
String encoding = "UTF-8";
File file = new File(filePath);
if (file.isFile() && file.exists()) {
InputStreamReader read = new InputStreamReader(
new FileInputStream(file), encoding);
BufferedReader bufferedReader = new BufferedReader(read);
String lineTxt;
while ((lineTxt = bufferedReader.readLine()) != null) {
String[] split = lineTxt.split("\\.");
if (split.length > lengthDomain) {
lengthDomain = split.length;
}
}
read.close();
} else {
logger.error("TopDomainUtils>>getMaxLength filePath is wrong--->{" + filePath + "}<---");
}
} catch (Exception e) {
logger.error("TopDomainUtils>=>getMaxLength get filePathData error--->{" + e + "}<---");
e.printStackTrace();
}
return lengthDomain;
}
private static HashMap<String, HashMap<String, String>> makeHashMap(String filePath) {
int maxLength = getMaxLength(filePath);
HashMap<String, HashMap<String, String>> maps = new HashMap<>();
for (int i = 0; i < maxLength; i++) {
maps.put("map_id_" + i, new HashMap<String, String>());
}
maps.put("full", new HashMap<String, String>());
return maps;
}
/**
* 通用方法,传入url,返回domain,这里的domain不包含端口号,含有:一定是v6
* @param oriUrl
* @return
*/
public static String getDomainFromUrl(String oriUrl) {
//先按照?切分,排除后续干扰
String url = oriUrl.split("[?]")[0];
//排除http://或https://干扰
url = url.replaceAll("https://", "").replaceAll("http://", "");
String domain;
//获取domain
if (url.split("/")[0].split(":").length <= 2) {
//按照:切分后最终长度为1或2,说明是v4
domain = url
//按照/切分,索引0包含domain
.split("/")[0]
//v4按照:切分去除domain上的端口号后,索引0为最终域名
.split(":")[0];
} else {
//按照:切分后长度>2,说明是v6地址,v6地址不包含端口号(暂定),只需要先切分//再切分/
domain = url.split("/")[0];
}
return domain;
}
}

View File

@@ -1,34 +0,0 @@
#arangoDB参数配置
arangoDB.host=192.168.40.182
arangoDB.port=8529
arangoDB.user=root
arangoDB.password=111111
#arangoDB.DB.name=ip-learning-test
arangoDB.DB.name=insert_iplearn_index
arangoDB.batch=100000
arangoDB.ttl=3600
update.arango.batch=10000
thread.pool.number=10
thread.await.termination.time=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围
clickhouse.time.limit.type=1
read.clickhouse.max.time=1571245220
read.clickhouse.min.time=1571245210
#读取arangoDB时间范围方式0正常读1指定时间范围
arango.time.limit.type=1
read.arango.max.time=1571245220
read.arango.min.time=1571245210
update.interval=3600
distinct.client.ip.num=10000
recent.count.hour=24
top.domain.file.name=topDomain.txt
arangoDB.read.limit=

View File

@@ -1,8 +0,0 @@
drivers=ru.yandex.clickhouse.ClickHouseDriver
db.id=192.168.40.193:8123/av_miner?socket_timeout=300000
#db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
mdb.user=default
mdb.password=111111
initialsize=1
minidle=1
maxactive=50

View File

@@ -1,25 +0,0 @@
######################### logger ##############################
log4j.logger.org.apache.http=OFF
log4j.logger.org.apache.http.wire=OFF
#Log4j
log4j.rootLogger=info,console,file
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=info
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=info
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><D3A6>Ŀ<EFBFBD><C4BF>
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
log4j.appender.file.file=./logs/ip-learning-application.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n

File diff suppressed because it is too large Load Diff

View File

@@ -1,29 +0,0 @@
package cn.ac.iie;
import java.lang.reflect.Array;
import java.util.ArrayList;
public class Test {
public static void main(String args[]) throws Exception {
Tester test = new Tester();
Tester[] tests = new Tester[0];
// ArrayList<Tester> testers = new ArrayList<>();
// testers.add(test);
Class<?> c1 = tests.getClass().getComponentType();
Class<?> c2 = Tester.class;
Class<?> c3 = test.getClass();
System.out.println(c1.getName());
System.out.println(c2.getName());
System.out.println(c3.getName());
// Tester[] newTesters = (Tester[]) Array.newInstance(c1, 10);
// Tester newTester = (Tester) c1.newInstance();
// System.out.println(newTesters.length);
}
}
class Tester {
private String name;
private String mem;
}

View File

@@ -1,37 +0,0 @@
package cn.ac.iie;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.ArangoCursor;
import com.arangodb.ArangoDatabase;
import com.arangodb.entity.BaseEdgeDocument;
import java.util.ArrayList;
import java.util.List;
public class TestArango {
public static void main(String[] args) {
ArangoDBConnect instance = ArangoDBConnect.getInstance();
/*
String query = "FOR doc IN IP filter doc.FIRST_FOUND_TIME >= 1592996080 and doc.FIRST_FOUND_TIME <= 1593112913 RETURN doc";
ArangoCursor<BaseEdgeDocument> baseEdgeDocuments = instance.executorQuery(query, BaseEdgeDocument.class);
while (baseEdgeDocuments.hasNext()){
BaseEdgeDocument next = baseEdgeDocuments.next();
System.out.println(next.toString());
}
*/
BaseEdgeDocument baseEdgeDocument = new BaseEdgeDocument();
baseEdgeDocument.setKey("192.168.50.6-www.liftopia.com");
baseEdgeDocument.setFrom("IP/192.168.50.6");
baseEdgeDocument.setTo("FQDN/www.liftopia.com");
baseEdgeDocument.addAttribute("HTTP_CNT_TOTAL",3L);
baseEdgeDocument.addAttribute("DNS_CNT_RECENT",new long[24]);
baseEdgeDocument.addAttribute("PROTOCOL_TYPE","HTTP");
ArrayList<BaseEdgeDocument> baseEdgeDocuments = new ArrayList<>();
baseEdgeDocuments.add(baseEdgeDocument);
instance.overwrite(baseEdgeDocuments,"R_LOCATE_FQDN2IP");
instance.clean();
}
}

View File

@@ -1,30 +0,0 @@
package cn.ac.iie;
import java.io.*;
import java.net.URL;
import java.util.TreeMap;
import java.util.regex.Pattern;
public class TestReadLine {
public static void main(String[] args) throws Exception {
Pattern pattern = Pattern.compile("^[^.]*$");
String encoding = "UTF-8";
// File file = new File("C:\\Users\\94976\\Desktop\\test.txt");
URL url = TestReadLine.class.getClassLoader().getResource("topDomain.txt");
assert url != null;
File file = new File(url.getFile());
InputStreamReader read = new InputStreamReader(
new FileInputStream(file), encoding);
BufferedReader bufferedReader = new BufferedReader(read);
String lineTxt;
int cnt = 0;
while ((lineTxt = bufferedReader.readLine()) != null){
if (pattern.matcher(lineTxt).matches()){
cnt += 1;
System.out.println(lineTxt);
}
}
System.out.println(cnt);
System.out.println(url.getFile());
}
}

View File

@@ -1,20 +0,0 @@
package cn.ac.iie;
import cn.ac.iie.service.update.vertex.Fqdn;
import cn.ac.iie.utils.ArangoDBConnect;
import java.lang.reflect.Constructor;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
public class TestReflect {
public static void main(String[] args) throws Exception {
Class<Fqdn> fqdnClass = Fqdn.class;
// for(Constructor constructor : fqdnClass.getConstructors()){
// System.out.println(constructor);
// }
Constructor<Fqdn> constructor = fqdnClass.getConstructor(HashMap.class, ArangoDBConnect.class, String.class, ConcurrentHashMap.class, CountDownLatch.class);
System.out.println(constructor);
}
}

View File

@@ -1,10 +0,0 @@
package cn.ac.iie;
import java.util.regex.Pattern;
public class TestRegex {
public static void main(String[] args) {
Pattern pattern = Pattern.compile("^[^.]*$");
System.out.println(pattern.matcher("com.dz").matches());
}
}

View File

@@ -1,162 +0,0 @@
<component name="libraryTable">
<library name="scala-sdk-2.11.7" type="Scala">
<properties>
<language-level>Scala_2_11</language-level>
<compiler-classpath>
<root url="file://D:/soft/scala/scala-2.11.7/lib/scala-compiler.jar" />
<root url="file://D:/soft/scala/scala-2.11.7/lib/scala-library.jar" />
<root url="file://D:/soft/scala/scala-2.11.7/lib/scala-reflect.jar" />
</compiler-classpath>
</properties>
<CLASSES>
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-actors-2.11.0.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-actors-migration_2.11-1.1.0.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-library.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-parser-combinators_2.11-1.0.4.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-reflect.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-swing_2.11-1.0.2.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-xml_2.11-1.0.4.jar!/" />
</CLASSES>
<JAVADOC>
<root url="http://www.scala-lang.org/api/2.11.7/" />
</JAVADOC>
<SOURCES>
<root url="file://D:/tar/scala-2.11.7" />
<root url="file://D:/tar/scala-2.11.7/src/actors" />
<root url="file://D:/tar/scala-2.11.7/src/forkjoin" />
<root url="file://D:/tar/scala-2.11.7/src/library" />
<root url="file://D:/tar/scala-2.11.7/src/partest-extras" />
<root url="file://D:/tar/scala-2.11.7/src/partest-javaagent" />
<root url="file://D:/tar/scala-2.11.7/src/repl" />
<root url="file://D:/tar/scala-2.11.7/test/disabled/pos/t1737" />
<root url="file://D:/tar/scala-2.11.7/test/disabled/presentation/akka/src" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/deprecation" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/duration-java" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t1143-2" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t1342" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t1464" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2163" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2470" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2570" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2585" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t3003" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t3415" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t7253" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/varargs" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/abstract-class-error" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/primitive-sigs-1" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/raw-types-stubs" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t0673" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t2442" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t4851" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t6013" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t6289" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t750" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t750b" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t8244" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t8244e" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t8376" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/ilya2" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/super" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t0695" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1101" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1102" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1150" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1152" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1176" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1186" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1196" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1197" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1230" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1231" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1232" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1235" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1254" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1409" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1642" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1711" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1745" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1751" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1782" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1836" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2377" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2409" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2433" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2464" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t294" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2940" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2956" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3120" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3249" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3349" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3404" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3429" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3486" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3521" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3567" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3642" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3938" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3943" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t4603" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t4744" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t5165" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t5703" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t5957" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t6169" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t942" />
<root url="file://D:/tar/scala-2.11.7/test/files/presentation/ide-bug-1000469/src" />
<root url="file://D:/tar/scala-2.11.7/test/files/presentation/ide-bug-1000531/src" />
<root url="file://D:/tar/scala-2.11.7/test/files/res/t6613" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/bcodeInlinerMixed" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/reflection-fancy-java-classes" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/reflection-java-annotations" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/reflection-java-crtp" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452a" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452b-bcode" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452d" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452e" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452g" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3897" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4238" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4317" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4729" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4788" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4788-separate-compilation" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4891" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6168" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6168b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6240a" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6240b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6548" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7008" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7246" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7246b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7359" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7374" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7439" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7455" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7741a" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7741b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t8442" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t8601e" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9268" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9298" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9298b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9359" />
<root url="file://D:/tar/scala-2.11.7/test/files/t8449" />
<root url="file://D:/tar/scala-2.11.7/test/flaky/pos/t2868" />
<root url="file://D:/tar/scala-2.11.7/test/instrumented/library" />
<root url="file://D:/tar/scala-2.11.7/test/junit" />
<root url="file://D:/tar/scala-2.11.7/test/pending/jvm/t2705" />
<root url="file://D:/tar/scala-2.11.7/test/pending/pos/misc" />
<root url="file://D:/tar/scala-2.11.7/test/pending/pos/t3943" />
<root url="file://D:/tar/scala-2.11.7/test/pending/pos/t7778" />
<root url="file://D:/tar/scala-2.11.7/test/pending/run/t3899" />
<root url="file://D:/tar/scala-2.11.7/test/pending/run/t4713" />
<root url="file://D:/tar/scala-2.11.7/test/support/annotations" />
</SOURCES>
</library>
</component>

View File

@@ -63,6 +63,18 @@
<version>6.6.3</version> <version>6.6.3</version>
</dependency> </dependency>
<dependency>
<groupId>com.arangodb</groupId>
<artifactId>velocypack-module-jdk8</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>com.arangodb</groupId>
<artifactId>velocypack-module-scala_2.11</artifactId>
<version>1.2.0</version>
</dependency>
<dependency> <dependency>
<groupId>org.scala-lang</groupId> <groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId> <artifactId>scala-library</artifactId>
@@ -75,6 +87,11 @@
<version>3.2.0</version> <version>3.2.0</version>
</dependency> </dependency>
<dependency>
<groupId>org.scala-lang.modules</groupId>
<artifactId>scala-xml_2.11</artifactId>
<version>1.0.4</version>
</dependency>
<dependency> <dependency>
<groupId>org.scala-tools</groupId> <groupId>org.scala-tools</groupId>

View File

@@ -6,7 +6,6 @@ import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool; import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.ArangoCursor; import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument; import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@@ -20,15 +19,6 @@ import java.util.concurrent.CountDownLatch;
*/ */
public class BaseArangoData { public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class); private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance(); private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance(); private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
@@ -43,9 +33,11 @@ public class BaseArangoData {
historyMap.put(i, new ConcurrentHashMap<>()); historyMap.put(i, new ConcurrentHashMap<>());
} }
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER()); CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
long[] timeRange = getTimeRange(table); // long[] timeRange = getTimeRange(table);
Long countTotal = getCountTotal(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) { for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
String sql = getQuerySql(timeRange, i, table); // String sql = getQuerySql(timeRange, i, table);
String sql = getQuerySql(countTotal, i, table);
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch); ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData); threadPool.executor(readHistoryArangoData);
} }
@@ -57,47 +49,27 @@ public class BaseArangoData {
} }
} }
private long[] getTimeRange(String table) { private Long getCountTotal(String table){
long minTime = 0L; long start = System.currentTimeMillis();
long maxTime = 0L; Long cnt = 0L;
long startTime = System.currentTimeMillis(); String sql = "RETURN LENGTH("+table+")";
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}"; try {
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) { ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
case 0: while (longs.hasNext()){
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class); cnt = longs.next();
try { }
if (timeDoc != null) { }catch (Exception e){
while (timeDoc.hasNext()) { LOG.error(sql +"执行异常");
BaseDocument doc = timeDoc.next();
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
}
} else {
LOG.warn("获取ArangoDb时间范围为空");
}
} catch (Exception e) {
e.printStackTrace();
}
break;
case 1:
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
break;
default:
} }
long lastTime = System.currentTimeMillis(); long last = System.currentTimeMillis();
LOG.warn(sql + "\n查询最大最小时间用时" + (lastTime - startTime)); LOG.info(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
return new long[]{minTime, maxTime}; return cnt;
} }
private String getQuerySql(long[] timeRange, int threadNumber, String table) { private String getQuerySql(Long cnt,int threadNumber, String table){
long minTime = timeRange[0]; long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER() + 1;
long maxTime = timeRange[1]; long offsetNum = threadNumber * sepNum;
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER(); return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
long minThreadTime = minTime + threadNumber * diffTime;
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
} }
} }

View File

@@ -1,45 +1,39 @@
#spark任务配置 #spark任务配置
spark.sql.shuffle.partitions=5 spark.sql.shuffle.partitions=10
spark.executor.memory=4g spark.executor.memory=4g
spark.app.name=test spark.app.name=test
spark.network.timeout=300s spark.network.timeout=300s
repartitionNumber=36
spark.serializer=org.apache.spark.serializer.KryoSerializer spark.serializer=org.apache.spark.serializer.KryoSerializer
master=local[*] master=local[*]
#spark读取clickhouse配置 #spark读取clickhouse配置
spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3 spark.read.clickhouse.url=jdbc:clickhouse://192.168.44.67:8123/tsg_galaxy_v3
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
spark.read.clickhouse.user=default spark.read.clickhouse.user=default
spark.read.clickhouse.password=111111 spark.read.clickhouse.password=ceiec2019
spark.read.clickhouse.numPartitions=144 spark.read.clickhouse.numPartitions=5
spark.read.clickhouse.fetchsize=10000 spark.read.clickhouse.fetchsize=10000
spark.read.clickhouse.partitionColumn=common_recv_time spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
clickhouse.socket.timeout=300000 clickhouse.socket.timeout=300000
#arangoDB配置 #arangoDB配置
arangoDB.host=192.168.40.182 arangoDB.host=192.168.40.182
arangoDB.port=8529 arangoDB.port=8529
arangoDB.user=upsert arangoDB.user=upsert
arangoDB.password=ceiec2018 arangoDB.password=ceiec2018
#arangoDB.DB.name=insert_iplearn_index
arangoDB.DB.name=ip-learning-test-0 arangoDB.DB.name=ip-learning-test-0
#arangoDB.DB.name=iplearn_media_domain
arangoDB.ttl=3600 arangoDB.ttl=3600
thread.pool.number=5 thread.pool.number=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围 #读取clickhouse时间范围方式0读取过去一小时1指定时间范围
clickhouse.time.limit.type=0 clickhouse.time.limit.type=1
read.clickhouse.max.time=1571245220 read.clickhouse.max.time=1603785961
read.clickhouse.min.time=1571245210 read.clickhouse.min.time=1603354682
#读取arangoDB时间范围方式0正常读1指定时间范围 arangoDB.read.limit=1
arango.time.limit.type=0
read.arango.max.time=1571245320
read.arango.min.time=1571245200
arangoDB.read.limit=
update.arango.batch=10000 update.arango.batch=10000
distinct.client.ip.num=10000 distinct.client.ip.num=10000
recent.count.hour=24 recent.count.hour=24
update.interval=10800 update.interval=3600

View File

@@ -36,12 +36,7 @@ object ApplicationConfig {
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time") val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time") val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type") val ARANGODB_READ_LIMIT: Int = config.getInt("arangoDB.read.limit")
val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch") val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour") val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num") val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")

View File

@@ -11,7 +11,7 @@ object BaseClickhouseData {
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60 val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
private val timeLimit: (Long, Long) = getTimeLimit private val timeLimit: (Long, Long) = getTimeLimit
private def initClickhouseData(sql:String): Unit ={ private def initClickhouseData(sql:String): DataFrame ={
val dataFrame: DataFrame = spark.read.format("jdbc") val dataFrame: DataFrame = spark.read.format("jdbc")
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL) .option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
@@ -28,6 +28,8 @@ object BaseClickhouseData {
.load() .load()
dataFrame.printSchema() dataFrame.printSchema()
dataFrame.createOrReplaceGlobalTempView("dbtable") dataFrame.createOrReplaceGlobalTempView("dbtable")
dataFrame
} }
def loadConnectionDataFromCk(): Unit ={ def loadConnectionDataFromCk(): Unit ={
@@ -68,41 +70,7 @@ object BaseClickhouseData {
initClickhouseData(sql) initClickhouseData(sql)
} }
def getVertexFqdnDf: DataFrame ={ /*
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|FROM
| (
| (SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'SSL' GROUP BY ssl_sni
| )
| UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'HTTP' GROUP BY http_host
| )
| )
|GROUP BY
| FQDN
|HAVING
| FQDN != ''
""".stripMargin
LOG.warn(sql)
val vertexFqdnDf = spark.sql(sql)
vertexFqdnDf.printSchema()
vertexFqdnDf
}
def getVertexIpDf: DataFrame ={ def getVertexIpDf: DataFrame ={
loadConnectionDataFromCk() loadConnectionDataFromCk()
val sql = val sql =
@@ -190,6 +158,149 @@ object BaseClickhouseData {
relationFqdnLocateIpDf.printSchema() relationFqdnLocateIpDf.printSchema()
relationFqdnLocateIpDf relationFqdnLocateIpDf
} }
*/
def getVertexFqdnDf: DataFrame ={
val sql =
"""
|(SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|FROM
| ((SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM tsg_galaxy_v3.connection_record_log
| WHERE common_schema_type = 'SSL' GROUP BY ssl_sni
| )UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM tsg_galaxy_v3.connection_record_log
| WHERE common_schema_type = 'HTTP' GROUP BY http_host))
|GROUP BY FQDN HAVING FQDN != '') as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getVertexIpDf: DataFrame ={
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT * FROM
|((SELECT common_client_ip AS IP,MIN(common_recv_time) AS FIRST_FOUND_TIME,
|MAX(common_recv_time) AS LAST_FOUND_TIME,
|count(*) as SESSION_COUNT,
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|'client' as ip_type
|FROM tsg_galaxy_v3.connection_record_log
|where $where
|group by common_client_ip)
|UNION ALL
|(SELECT common_server_ip AS IP,
|MIN(common_recv_time) AS FIRST_FOUND_TIME,
|MAX(common_recv_time) AS LAST_FOUND_TIME,
|count(*) as SESSION_COUNT,
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|'server' as ip_type
|FROM tsg_galaxy_v3.connection_record_log
|where $where
|group by common_server_ip))) as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getRelationFqdnLocateIpDf: DataFrame ={
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT * FROM
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|FROM tsg_galaxy_v3.connection_record_log
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|UNION ALL
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|FROM tsg_galaxy_v3.connection_record_log
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|WHERE FQDN != '') as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getRelationSubidLocateIpDf: DataFrame ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
""".stripMargin
val sql =
s"""
|(
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|FROM radius_record_log
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|) as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getVertexSubidDf: DataFrame ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
""".stripMargin
val sql =
s"""
|(
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log
|WHERE $where GROUP BY common_subscriber_id
|)as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
def getVertexFramedIpDf: DataFrame ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
""".stripMargin
val sql =
s"""
|(
|SELECT DISTINCT radius_framed_ip,common_recv_time as LAST_FOUND_TIME FROM radius_record_log WHERE $where
|)as dbtable
""".stripMargin
LOG.warn(sql)
val frame = initClickhouseData(sql)
frame.printSchema()
frame
}
private def getTimeLimit: (Long,Long) ={ private def getTimeLimit: (Long,Long) ={
var maxTime = 0L var maxTime = 0L

View File

@@ -4,37 +4,57 @@ import java.util.regex.Pattern
import cn.ac.iie.config.ApplicationConfig import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseClickhouseData import cn.ac.iie.dao.BaseClickhouseData
import cn.ac.iie.spark.ArangoSpark
import cn.ac.iie.spark.partition.CustomPartitioner import cn.ac.iie.spark.partition.CustomPartitioner
import cn.ac.iie.spark.rdd.ReadOptions
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row import org.apache.spark.sql.Row
import org.apache.spark.sql.functions._ import org.apache.spark.sql.functions._
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
import cn.ac.iie.utils.SparkSessionUtil._
object MergeDataFrame { object MergeDataFrame {
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass) private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
private val pattern = Pattern.compile("^[\\d]*$") private val pattern = Pattern.compile("^[\\d]*$")
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
def mergeVertexFqdn(): RDD[Row] ={ def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
BaseClickhouseData.getVertexFqdnDf val fqdnAccmu = getLongAccumulator("FQDN Accumulator")
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row)) val fqdnRddRow = BaseClickhouseData.getVertexFqdnDf
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values .rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
fqdnAccmu.add(1)
(row.getAs[String]("FQDN"), row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
fqdnRddRow.cache()
val fqdnRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"FQDN",options)
fqdnRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnRddRow)
} }
def mergeVertexIp(): RDD[Row]={ def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Option[Row]))]={
val ipAccum = getLongAccumulator("IP Accumulator")
val vertexIpDf = BaseClickhouseData.getVertexIpDf val vertexIpDf = BaseClickhouseData.getVertexIpDf
val frame = vertexIpDf.groupBy("IP").agg( val frame = vertexIpDf.groupBy("IP").agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"), min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"), max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"), collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"), collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
collect_list("ip_type").alias("ip_type_list") collect_list("ip_type").alias("ip_type_list"),
last("common_link_info").alias("common_link_info")
) )
val values = frame.rdd.map(row => (row.get(0), row)) val ipRddRow = frame.rdd.map(row => {
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values ipAccum.add(1)
values (row.getAs[String]("IP"), row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val ipRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
ipRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(ipRddRow)
} }
def mergeRelationFqdnLocateIp(): RDD[Row] ={ def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
val fqdnLocIpAccum = getLongAccumulator("R_LOCATE_FQDN2IP Accumulator")
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN"))) val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
.groupBy("FQDN", "common_server_ip") .groupBy("FQDN", "common_server_ip")
.agg( .agg(
@@ -44,13 +64,61 @@ object MergeDataFrame {
collect_list("schema_type").alias("schema_type_list"), collect_list("schema_type").alias("schema_type_list"),
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT") collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
) )
frame.rdd.map(row => { val fqdnLocIpRddRow = frame.rdd.map(row => {
val fqdn = row.getAs[String]("FQDN") val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip") val serverIp = row.getAs[String]("common_server_ip")
val key = fqdn.concat("-"+serverIp) val key = fqdn.concat("-" + serverIp)
(key,row) fqdnLocIpAccum.add(1)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values (key, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val fqdnLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_FQDN2IP",options)
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnLocIpRddRow)
}
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
val subidLocIpAccum = getLongAccumulator("R_LOCATE_SUBSCRIBER2IP Accumulator")
val subidLocIpRddRow = BaseClickhouseData.getRelationSubidLocateIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
val ip = row.getAs[String]("radius_framed_ip")
val key = commonSubscriberId.concat("-" + ip)
subidLocIpAccum.add(1)
(key, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val subidLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_SUBSCRIBER2IP",options)
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidLocIpRddRow)
}
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
val subidAccum = getLongAccumulator("SUBSCRIBER Accumulator")
val subidRddRow = BaseClickhouseData.getVertexSubidDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
subidAccum.add(1)
(commonSubscriberId, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val subidRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"SUBSCRIBER",options)
subidRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidRddRow)
}
def mergeVertexFrameIp: RDD[Row] ={
val framedIpAccum = getLongAccumulator("framed ip Accumulator")
val values = BaseClickhouseData.getVertexFramedIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val ip = row.getAs[String]("radius_framed_ip")
framedIpAccum.add(1)
(ip, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
values
} }
private def isDomain(fqdn: String): Boolean = { private def isDomain(fqdn: String): Boolean = {
@@ -58,14 +126,10 @@ object MergeDataFrame {
if (fqdn == null || fqdn.length == 0) { if (fqdn == null || fqdn.length == 0) {
return false return false
} }
if (fqdn.contains(":")) {
val s = fqdn.split(":")(0) val fqdnArr = fqdn.split(":")(0).split("\\.")
if (s.contains(":")){
return false if (fqdnArr.length != 4){
}
}
val fqdnArr = fqdn.split("\\.")
if (fqdnArr.length < 4 || fqdnArr.length > 4){
return true return true
} }
for (f <- fqdnArr) { for (f <- fqdnArr) {
@@ -83,6 +147,7 @@ object MergeDataFrame {
LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString) LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString)
} }
false false
} }
} }

View File

@@ -1,7 +1,8 @@
package cn.ac.iie.service.update package cn.ac.iie.service.update
import java.lang import java.util
import scala.collection.JavaConversions._
import cn.ac.iie.config.ApplicationConfig import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.service.read.ReadHistoryArangoData import cn.ac.iie.service.read.ReadHistoryArangoData
@@ -14,16 +15,24 @@ object UpdateDocHandler {
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS") val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={ def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong if(hisDoc.getProperties.containsKey(attributeName)){
if (newAttribute > hisAttritube){ var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
hisAttritube = newAttribute if (newAttribute > hisAttritube){
hisAttritube = newAttribute
}
hisDoc.addAttribute(attributeName,hisAttritube)
} }
hisDoc.addAttribute(attributeName,hisAttritube)
} }
def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={ def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong if (hisDoc.getProperties.containsKey(attributeName)){
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube) val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
}
}
def replaceAttribute(hisDoc: BaseDocument,newAttribute:String,attributeName:String): Unit ={
hisDoc.addAttribute(attributeName,newAttribute)
} }
def separateAttributeByIpType(ipTypeList:ofRef[String], def separateAttributeByIpType(ipTypeList:ofRef[String],
@@ -62,19 +71,21 @@ object UpdateDocHandler {
} }
def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={ def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString if (hisDoc.getProperties.containsKey("PROTOCOL_TYPE")){
protocolMap.foreach(t => { var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
if (t._2 > 0 && !protocolType.contains(t._1)){ protocolMap.foreach((t: (String, Long)) => {
protocolType = protocolType.concat(","+ t._1) if (t._2 > 0 && !protocolType.contains(t._1)){
} protocolType = protocolType.concat(","+ t._1)
val cntTotalName = t._1.concat("_CNT_TOTAL") }
val cntRecentName = t._1.concat("_CNT_RECENT") val cntTotalName = t._1.concat("_CNT_TOTAL")
val cntRecent: Array[lang.Long] = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[java.lang.Long]] val cntRecentName = t._1.concat("_CNT_RECENT")
cntRecent.update(0,t._2) val cntRecent = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[Long]]
updateSumAttribute(hisDoc,t._2,cntTotalName) cntRecent.update(0,t._2)
hisDoc.addAttribute(cntRecentName,cntRecent) updateSumAttribute(hisDoc,t._2,cntTotalName)
}) hisDoc.addAttribute(cntRecentName,cntRecent)
hisDoc.addAttribute("PROTOCOL_TYPE",protocolType) })
hisDoc.addAttribute("PROTOCOL_TYPE",protocolType)
}
} }
def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={ def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
@@ -93,10 +104,30 @@ object UpdateDocHandler {
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",","")) doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
} }
def mergeDistinctIp(distCipRecent:ofRef[ofRef[String]]): Array[String] ={ def updateProtocolDocument(doc: BaseEdgeDocument): Unit = {
distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray if (doc.getProperties.containsKey("PROTOCOL_TYPE")) {
for (protocol <- PROTOCOL_SET) {
val protocolRecent = protocol + "_CNT_RECENT"
val cntRecent: util.ArrayList[Long] = doc.getAttribute(protocolRecent).asInstanceOf[util.ArrayList[Long]]
val cntRecentsSrc = cntRecent.toArray().map(_.toString.toLong)
val cntRecentsDst = new Array[Long](24)
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1)
cntRecentsDst(0) = 0L
doc.addAttribute(protocolRecent, cntRecentsDst)
}
}
} }
def mergeDistinctIp(distCipRecent:ofRef[String]): Array[String] ={
distCipRecent.flatMap(str => {
str.replaceAll("\\[","")
.replaceAll("\\]","")
.replaceAll("\\'","")
.split(",")
}).distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
}
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={ def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
val map = newDistinctIp.map(ip => { val map = newDistinctIp.map(ip => {
(ip, ReadHistoryArangoData.currentHour) (ip, ReadHistoryArangoData.currentHour)
@@ -106,17 +137,19 @@ object UpdateDocHandler {
} }
def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={ def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[Array[String]] if (hisDoc.getProperties.containsKey("DIST_CIP") && hisDoc.getProperties.containsKey("DIST_CIP_TS")){
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[Array[Long]] val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
if (hisDistCip.length == hisDistCipTs.length){ val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[util.ArrayList[Long]]
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap if (hisDistCip.length == hisDistCipTs.length){
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*) val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
newDistinctIp.foreach(cip => { val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour) newDistinctIp.foreach(cip => {
}) muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap })
hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray) val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray) hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray)
}
} }
} }

View File

@@ -1,17 +1,12 @@
package cn.ac.iie.service.update package cn.ac.iie.service.update
import java.util import java.util
import java.util.concurrent.ConcurrentHashMap
import cn.ac.iie.config.ApplicationConfig import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import cn.ac.iie.service.transform.MergeDataFrame._ import cn.ac.iie.service.transform.MergeDataFrame._
import cn.ac.iie.service.update.UpdateDocHandler._ import cn.ac.iie.service.update.UpdateDocHandler._
import cn.ac.iie.utils.{ArangoDBConnect, ExecutorThreadPool, SparkSessionUtil} import cn.ac.iie.utils.{ArangoDBConnect, SparkSessionUtil}
import cn.ac.iie.utils.SparkSessionUtil.spark
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument} import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row import org.apache.spark.sql.Row
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
@@ -19,44 +14,47 @@ import org.slf4j.LoggerFactory
import scala.collection.mutable.WrappedArray.ofRef import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocument { object UpdateDocument {
private val pool = ExecutorThreadPool.getInstance
private val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance() private val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance()
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass) private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
private val baseArangoData = new BaseArangoData()
def update(): Unit = { def update(): Unit = {
try { try {
updateDocument("FQDN", historyVertexFqdnMap, getVertexFqdnRow, classOf[BaseDocument], mergeVertexFqdn) updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
updateDocument("IP", historyVertexIpMap, getVertexIpRow, classOf[BaseDocument], mergeVertexIp)
updateDocument("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp) updateDocument("SUBSCRIBER",getVertexSubidRow,mergeVertexSubid)
insertFrameIp()
updateDocument("R_LOCATE_SUBSCRIBER2IP",getRelationSubidLocateIpRow,mergeRelationSubidLocateIp)
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, mergeRelationFqdnLocateIp)
updateDocument("IP", getVertexIpRow, mergeVertexIp)
} catch { } catch {
case e: Exception => e.printStackTrace() case e: Exception => e.printStackTrace()
} finally { } finally {
pool.shutdown()
arangoManger.clean() arangoManger.clean()
SparkSessionUtil.closeSpark() SparkSessionUtil.closeSpark()
System.exit(0)
} }
} }
private def updateDocument[T <: BaseDocument](collName: String, private def updateDocument[T <: BaseDocument](collName: String,
historyMap: ConcurrentHashMap[Integer, ConcurrentHashMap[String, T]], getDocumentRow: ((String, (Option[T], Option[Row]))) => T,
getDocumentRow: (Row, ConcurrentHashMap[String, T]) => T, getJoinRdd: () => RDD[(String, (Option[T], Option[Row]))]
clazz: Class[T],
getNewDataRdd: () => RDD[Row]
): Unit = { ): Unit = {
baseArangoData.readHistoryData(collName, historyMap, clazz)
val hisBc = spark.sparkContext.broadcast(historyMap)
try { try {
val start = System.currentTimeMillis() val start = System.currentTimeMillis()
val newDataRdd = getNewDataRdd() val joinRdd = getJoinRdd()
newDataRdd.foreachPartition(iter => { joinRdd.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val dictionaryMap: ConcurrentHashMap[String, T] = hisBc.value.get(partitionId)
val resultDocumentList = new util.ArrayList[T] val resultDocumentList = new util.ArrayList[T]
var i = 0 var i = 0
iter.foreach(row => { iter.foreach(row => {
val document = getDocumentRow(row, dictionaryMap) val document = getDocumentRow(row)
resultDocumentList.add(document) if (document != null){
resultDocumentList.add(document)
}
i += 1 i += 1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) { if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
arangoManger.overwrite(resultDocumentList, collName) arangoManger.overwrite(resultDocumentList, collName)
@@ -73,88 +71,238 @@ object UpdateDocument {
LOG.warn(s"更新$collName 时间:${last - start}") LOG.warn(s"更新$collName 时间:${last - start}")
} catch { } catch {
case e: Exception => e.printStackTrace() case e: Exception => e.printStackTrace()
} finally {
hisBc.destroy()
} }
} }
private def getVertexFqdnRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = { private def insertFrameIp(): Unit ={
val fqdn = row.getAs[String]("FQDN") mergeVertexFrameIp.foreachPartition(iter => {
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME") val resultDocumentList = new util.ArrayList[BaseDocument]
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME") var i = 0
var document: BaseDocument = dictionaryMap.getOrDefault(fqdn, null) iter.foreach(row => {
if (document != null) { val document = getVertexFrameipRow(row)
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME") resultDocumentList.add(document)
} else { i += 1
document = new BaseDocument if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
document.setKey(fqdn) arangoManger.overwrite(resultDocumentList, "IP")
document.addAttribute("FQDN_NAME", fqdn) LOG.warn(s"更新:IP" + i)
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime) i = 0
document.addAttribute("LAST_FOUND_TIME", lastFoundTime) }
} })
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn(s"更新IP:" + i)
}
})
}
private def getVertexFrameipRow(row: Row): BaseDocument ={
val ip = row.getAs[String]("radius_framed_ip")
val document = new BaseDocument()
document.setKey(ip)
document.addAttribute("IP",ip)
document document
} }
private def getVertexIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = { private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument ={
val ip = row.getAs[String]("IP")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
val sepAttributeTuple = separateAttributeByIpType(ipTypeList, sessionCountList, bytesSumList)
var document = dictionaryMap.getOrDefault(ip, null) val subidLocIpDocOpt = joinRow._2._1
if (document != null) { var subidLocIpDoc = subidLocIpDocOpt match {
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME") case Some(doc) => doc
updateSumAttribute(document, sepAttributeTuple._1, "SERVER_SESSION_COUNT") case None => null
updateSumAttribute(document, sepAttributeTuple._2, "SERVER_BYTES_SUM")
updateSumAttribute(document, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
updateSumAttribute(document, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
} else {
document = new BaseDocument
document.setKey(ip)
document.addAttribute("IP", ip)
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
document.addAttribute("SERVER_SESSION_COUNT", sepAttributeTuple._1)
document.addAttribute("SERVER_BYTES_SUM", sepAttributeTuple._2)
document.addAttribute("CLIENT_SESSION_COUNT", sepAttributeTuple._3)
document.addAttribute("CLIENT_BYTES_SUM", sepAttributeTuple._4)
document.addAttribute("COMMON_LINK_INFO", "")
} }
document
val subidLocIpRowOpt = joinRow._2._2
val subidLocIpRow = subidLocIpRowOpt match {
case Some(r) => r
case None => null
}
if (subidLocIpRow != null){
val subId = subidLocIpRow.getAs[String]("common_subscriber_id")
val ip = subidLocIpRow.getAs[String]("radius_framed_ip")
val lastFoundTime = subidLocIpRow.getAs[Long]("LAST_FOUND_TIME")
val firstFoundTime = subidLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
val key = subId.concat("-"+ip)
if (subidLocIpDoc != null){
updateMaxAttribute(subidLocIpDoc,lastFoundTime,"LAST_FOUND_TIME")
} else {
subidLocIpDoc = new BaseEdgeDocument()
subidLocIpDoc.setKey(key)
subidLocIpDoc.setFrom("SUBSCRIBER/" + subId)
subidLocIpDoc.setTo("IP/" + ip)
subidLocIpDoc.addAttribute("SUBSCRIBER",subId)
subidLocIpDoc.addAttribute("IP",ip)
subidLocIpDoc.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
subidLocIpDoc.addAttribute("LAST_FOUND_TIME",lastFoundTime)
}
}
subidLocIpDoc
} }
private def getRelationFqdnLocateIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseEdgeDocument]): BaseEdgeDocument = { private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument ={
val fqdn = row.getAs[String]("FQDN") val subidDocOpt = joinRow._2._1
val serverIp = row.getAs[String]("common_server_ip") var subidDoc = subidDocOpt match {
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME") case Some(doc) => doc
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME") case None => null
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
val distCipRecent = row.getAs[ofRef[ofRef[String]]]("DIST_CIP_RECENT")
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
val key = fqdn.concat("-" + serverIp)
var document = dictionaryMap.getOrDefault(key, null)
if (document != null) {
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
updateProtocolAttritube(document, sepAttritubeMap)
updateDistinctIp(document, distinctIp)
} else {
document = new BaseEdgeDocument()
document.setKey(key)
document.setFrom("FQDN/" + fqdn)
document.setTo("IP/" + serverIp)
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
putProtocolAttritube(document, sepAttritubeMap)
putDistinctIp(document, distinctIp)
} }
document
val subidRowOpt = joinRow._2._2
val subidRow = subidRowOpt match {
case Some(r) => r
case None => null
}
if (subidRow != null){
val subId = subidRow.getAs[String]("common_subscriber_id")
val subLastFoundTime = subidRow.getAs[Long]("LAST_FOUND_TIME")
val subFirstFoundTime = subidRow.getAs[Long]("FIRST_FOUND_TIME")
if (subidDoc != null){
updateMaxAttribute(subidDoc,subLastFoundTime,"LAST_FOUND_TIME")
} else {
subidDoc = new BaseDocument()
subidDoc.setKey(subId)
subidDoc.addAttribute("SUBSCRIBER",subId)
subidDoc.addAttribute("FIRST_FOUND_TIME",subFirstFoundTime)
subidDoc.addAttribute("LAST_FOUND_TIME",subLastFoundTime)
}
}
subidDoc
}
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
val fqdnDocOpt = joinRow._2._1
var fqdnDoc = fqdnDocOpt match {
case Some(doc) => doc
case None => null
}
val fqdnRowOpt = joinRow._2._2
val fqdnRow = fqdnRowOpt match {
case Some(r) => r
case None => null
}
if (fqdnRow != null){
val fqdn = fqdnRow.getAs[String]("FQDN")
val lastFoundTime = fqdnRow.getAs[Long]("LAST_FOUND_TIME")
val firstFoundTime = fqdnRow.getAs[Long]("FIRST_FOUND_TIME")
if (fqdnDoc != null) {
updateMaxAttribute(fqdnDoc, lastFoundTime, "LAST_FOUND_TIME")
} else {
fqdnDoc = new BaseDocument
fqdnDoc.setKey(fqdn)
fqdnDoc.addAttribute("FQDN_NAME", fqdn)
fqdnDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
fqdnDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
}
}
fqdnDoc
}
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
val ipDocOpt = joinRow._2._1
var ipDoc = ipDocOpt match {
case Some(doc) => doc
case None => null
}
val ipRowOpt = joinRow._2._2
val ipRow = ipRowOpt match {
case Some(r) => r
case None => null
}
if (ipRow != null){
val ip = ipRow.getAs[String]("IP")
val firstFoundTime = ipRow.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = ipRow.getAs[Long]("LAST_FOUND_TIME")
val sessionCountList = ipRow.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
val bytesSumList = ipRow.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
val ipTypeList = ipRow.getAs[ofRef[String]]("ip_type_list")
val linkInfo = ipRow.getAs[String]("common_link_info")
val sepAttributeTuple = separateAttributeByIpType(ipTypeList, sessionCountList, bytesSumList)
if (ipDoc != null) {
updateMaxAttribute(ipDoc, lastFoundTime, "LAST_FOUND_TIME")
updateSumAttribute(ipDoc, sepAttributeTuple._1, "SERVER_SESSION_COUNT")
updateSumAttribute(ipDoc, sepAttributeTuple._2, "SERVER_BYTES_SUM")
updateSumAttribute(ipDoc, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
updateSumAttribute(ipDoc, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
replaceAttribute(ipDoc,linkInfo,"COMMON_LINK_INFO")
} else {
ipDoc = new BaseDocument
ipDoc.setKey(ip)
ipDoc.addAttribute("IP", ip)
ipDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
ipDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
ipDoc.addAttribute("SERVER_SESSION_COUNT", sepAttributeTuple._1)
ipDoc.addAttribute("SERVER_BYTES_SUM", sepAttributeTuple._2)
ipDoc.addAttribute("CLIENT_SESSION_COUNT", sepAttributeTuple._3)
ipDoc.addAttribute("CLIENT_BYTES_SUM", sepAttributeTuple._4)
ipDoc.addAttribute("COMMON_LINK_INFO", "")
}
}
ipDoc
}
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument = {
val fqdnLocIpDocOpt = joinRow._2._1
var fqdnLocIpDoc = fqdnLocIpDocOpt match {
case Some(doc) => doc
case None => null
}
val fqdnLocIpRowOpt = joinRow._2._2
val fqdnLocIpRow = fqdnLocIpRowOpt match {
case Some(r) => r
case None => null
}
if (fqdnLocIpDoc != null){
updateProtocolDocument(fqdnLocIpDoc)
}
if (fqdnLocIpRow != null){
val fqdn = fqdnLocIpRow.getAs[String]("FQDN")
val serverIp = fqdnLocIpRow.getAs[String]("common_server_ip")
val firstFoundTime = fqdnLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = fqdnLocIpRow.getAs[Long]("LAST_FOUND_TIME")
val countTotalList = fqdnLocIpRow.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
val schemaTypeList = fqdnLocIpRow.getAs[ofRef[AnyRef]]("schema_type_list")
val distCipRecent = fqdnLocIpRow.getAs[ofRef[String]]("DIST_CIP_RECENT")
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
val key = fqdn.concat("-" + serverIp)
if (fqdnLocIpDoc != null) {
updateMaxAttribute(fqdnLocIpDoc, lastFoundTime, "LAST_FOUND_TIME")
updateProtocolAttritube(fqdnLocIpDoc, sepAttritubeMap)
updateDistinctIp(fqdnLocIpDoc, distinctIp)
} else {
fqdnLocIpDoc = new BaseEdgeDocument()
fqdnLocIpDoc.setKey(key)
fqdnLocIpDoc.setFrom("FQDN/" + fqdn)
fqdnLocIpDoc.setTo("IP/" + serverIp)
fqdnLocIpDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
fqdnLocIpDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
putProtocolAttritube(fqdnLocIpDoc, sepAttritubeMap)
putDistinctIp(fqdnLocIpDoc, distinctIp)
}
}
fqdnLocIpDoc
} }
} }

View File

@@ -0,0 +1,136 @@
/*
* DISCLAIMER
*
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright holder is ArangoDB GmbH, Cologne, Germany
*
* author Mark - mark at arangodb.com
*/
package cn.ac.iie.spark
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions, WriteOptions}
import cn.ac.iie.spark.vpack.VPackUtils
import com.arangodb.model.DocumentCreateOptions
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import scala.collection.JavaConverters.seqAsJavaListConverter
import scala.reflect.ClassTag
object ArangoSpark {
/**
* Save data from rdd into ArangoDB
*
* @param rdd the rdd with the data to save
* @param collection the collection to save in
*/
def save[T](rdd: RDD[T], collection: String): Unit =
save(rdd, collection, WriteOptions())
/**
* Save data from rdd into ArangoDB
*
* @param rdd the rdd with the data to save
* @param collection the collection to save in
* @param options additional write options
*/
def save[T](rdd: RDD[T], collection: String, options: WriteOptions): Unit =
saveRDD(rdd, collection, options, (x: Iterator[T]) => x)
/**
* Save data from dataset into ArangoDB
*
* @param dataset the dataset with data to save
* @param collection the collection to save in
*/
def save[T](dataset: Dataset[T], collection: String): Unit =
saveRDD(dataset.rdd, collection, WriteOptions(), (x: Iterator[T]) => x)
/**
* Save data from dataset into ArangoDB
*
* @param dataset the dataset with data to save
* @param collection the collection to save in
* @param options additional write options
*/
def save[T](dataset: Dataset[T], collection: String, options: WriteOptions): Unit =
saveRDD(dataset.rdd, collection, options, (x: Iterator[T]) => x)
/**
* Save data from dataframe into ArangoDB
*
* @param dataframe the dataframe with data to save
* @param collection the collection to save in
*/
def saveDF(dataframe: DataFrame, collection: String): Unit =
saveRDD[Row](dataframe.rdd, collection, WriteOptions(), (x: Iterator[Row]) => x.map { y => VPackUtils.rowToVPack(y) })
/**
* Save data from dataframe into ArangoDB
*
* @param dataframe the dataframe with data to save
* @param collection the collection to save in
* @param options additional write options
*/
def saveDF(dataframe: DataFrame, collection: String, options: WriteOptions): Unit =
saveRDD[Row](dataframe.rdd, collection, options, (x: Iterator[Row]) => x.map { y => VPackUtils.rowToVPack(y) })
private def saveRDD[T](rdd: RDD[T], collection: String, options: WriteOptions, map: Iterator[T] => Iterator[Any]): Unit = {
val writeOptions = createWriteOptions(options, rdd.sparkContext.getConf)
rdd.foreachPartition { p =>
if (p.nonEmpty) {
val arangoDB = createArangoBuilder(writeOptions).build()
val col = arangoDB.db(writeOptions.database).collection(collection)
val docs = map(p).toList.asJava
writeOptions.method match {
case WriteOptions.INSERT => col.insertDocuments(docs)
case WriteOptions.UPDATE => col.updateDocuments(docs)
case WriteOptions.REPLACE => col.replaceDocuments(docs)
case WriteOptions.OVERWRITE =>
val documentCreateOptions = new DocumentCreateOptions
documentCreateOptions.overwrite(true)
documentCreateOptions.silent(true)
col.insertDocuments(docs, documentCreateOptions)
}
arangoDB.shutdown()
}
}
}
/**
* Load data from ArangoDB into rdd
*
* @param sparkContext the sparkContext containing the ArangoDB configuration
* @param collection the collection to load data from
*/
def load[T: ClassTag](sparkContext: SparkContext, collection: String): ArangoRdd[T] =
load(sparkContext, collection, ReadOptions())
/**
* Load data from ArangoDB into rdd
*
* @param sparkContext the sparkContext containing the ArangoDB configuration
* @param collection the collection to load data from
* @param options read options
*/
def load[T: ClassTag](sparkContext: SparkContext, collection: String, options: ReadOptions): ArangoRdd[T] =
new ArangoRdd[T](sparkContext, createReadOptions(options, sparkContext.getConf).copy(collection = collection))
}

View File

@@ -0,0 +1,7 @@
package cn.ac.iie.spark.partition
import org.apache.spark.Partition
class QueryArangoPartition(idx: Int, val offset: Long, val separate: Long) extends Partition{
override def index: Int = idx
}

View File

@@ -0,0 +1,34 @@
package cn.ac.iie.spark.rdd
import com.arangodb.Protocol
import com.arangodb.entity.LoadBalancingStrategy
trait ArangoOptions {
def database: String = "_system"
def hosts: Option[String] = None
def user: Option[String] = None
def password: Option[String] = None
def useSsl: Option[Boolean] = None
def sslKeyStoreFile: Option[String] = None
def sslPassPhrase: Option[String] = None
def sslProtocol: Option[String] = None
def protocol: Option[Protocol] = None
def maxConnections: Option[Int] = None
def acquireHostList: Option[Boolean] = None
def acquireHostListInterval: Option[Int] = None
def loadBalancingStrategy: Option[LoadBalancingStrategy] = None
}

View File

@@ -0,0 +1,81 @@
package cn.ac.iie.spark.rdd
import scala.collection.JavaConverters.asScalaIteratorConverter
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.service.update.UpdateDocument
import cn.ac.iie.spark
import cn.ac.iie.spark.partition.QueryArangoPartition
import com.arangodb.ArangoCursor
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.slf4j.LoggerFactory
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
class ArangoRdd[T: ClassTag](@transient override val sparkContext: SparkContext,
val options: ReadOptions
) extends RDD[T](sparkContext, Nil) {
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
override def compute(split: Partition, context: TaskContext): Iterator[T] = {
createCursor(split.asInstanceOf[QueryArangoPartition]).asScala
}
override protected def getPartitions: Array[Partition] = {
val partitions = ArrayBuffer[Partition]()
val total = getCountTotal
for (i <- 0 until ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS) {
val partition = getPartition(i, total)
partitions += partition
}
partitions.toArray
}
private def createCursor(split: QueryArangoPartition)(implicit clazz: ClassTag[T]): ArangoCursor[T] = {
var arangoCursor:ArangoCursor[T] = null
val arangoDB = spark.createArangoBuilder(options).build()
try {
val offset = split.offset
val separate = split.separate
val collection = options.collection
val sql = s"FOR doc IN $collection limit $offset,$separate RETURN doc"
LOG.info(sql)
arangoCursor = arangoDB.db(options.database).query(sql,clazz.runtimeClass.asInstanceOf[Class[T]])
}catch {
case e: Exception => LOG.error(s"创建Cursor异常:${e.getMessage}")
}finally {
arangoDB.shutdown()
}
arangoCursor
}
override def repartition(numPartitions: Int)(implicit ord: Ordering[T]): RDD[T] = super.repartition(numPartitions)
private def getPartition(idx: Int, countTotal: Long): QueryArangoPartition = {
val sepNum = countTotal / ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS + 1
val offsetNum = idx * sepNum
new QueryArangoPartition(idx, offsetNum, sepNum)
}
override def count(): Long = getCountTotal
private def getCountTotal: Long = {
val arangoDB = spark.createArangoBuilder(options).build()
var cnt = 0L
val sql = s"RETURN LENGTH(${options.collection})"
LOG.info(sql)
try {
val longs = arangoDB.db(options.database).query(sql, classOf[Long])
while (longs.hasNext) cnt = longs.next
} catch {
case e: Exception => LOG.error(sql + s"执行异常:${e.getMessage}")
}finally {
arangoDB.shutdown()
}
cnt
}
}

View File

@@ -0,0 +1,93 @@
/*
* DISCLAIMER
*
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright holder is ArangoDB GmbH, Cologne, Germany
*
* author Mark - mark at arangodb.com
*/
package cn.ac.iie.spark.rdd
import cn.ac.iie.spark.partition.QueryArangoPartition
import com.arangodb.Protocol
import com.arangodb.entity.LoadBalancingStrategy
case class ReadOptions(override val database: String = "_system",
val collection: String = null,
partitioner: QueryArangoPartition = new QueryArangoPartition(0,0,0),
override val hosts: Option[String] = None,
override val user: Option[String] = None,
override val password: Option[String] = None,
override val useSsl: Option[Boolean] = None,
override val sslKeyStoreFile: Option[String] = None,
override val sslPassPhrase: Option[String] = None,
override val sslProtocol: Option[String] = None,
override val protocol: Option[Protocol] = None,
override val maxConnections: Option[Int] = None,
override val acquireHostList: Option[Boolean] = None,
override val acquireHostListInterval: Option[Int] = None,
override val loadBalancingStrategy: Option[LoadBalancingStrategy] = None) extends ArangoOptions {
def this() = this(database = "_system")
def database(database: String): ReadOptions = copy(database = database)
def collection(collection: String): ReadOptions = copy(collection = collection)
def hosts(hosts: String): ReadOptions = copy(hosts = Some(hosts))
def user(user: String): ReadOptions = copy(user = Some(user))
def password(password: String): ReadOptions = copy(password = Some(password))
def useSsl(useSsl: Boolean): ReadOptions = copy(useSsl = Some(useSsl))
def sslKeyStoreFile(sslKeyStoreFile: String): ReadOptions = copy(sslKeyStoreFile = Some(sslKeyStoreFile))
def sslPassPhrase(sslPassPhrase: String): ReadOptions = copy(sslPassPhrase = Some(sslPassPhrase))
def sslProtocol(sslProtocol: String): ReadOptions = copy(sslProtocol = Some(sslProtocol))
def protocol(protocol: Protocol): ReadOptions = copy(protocol = Some(protocol))
def maxConnections(maxConnections: Int): ReadOptions = copy(maxConnections = Some(maxConnections))
def acquireHostList(acquireHostList: Boolean): ReadOptions = copy(acquireHostList = Some(acquireHostList))
def acquireHostListInterval(acquireHostListInterval: Int): ReadOptions = copy(acquireHostListInterval = Some(acquireHostListInterval))
def loadBalancingStrategy(loadBalancingStrategy: LoadBalancingStrategy): ReadOptions = copy(loadBalancingStrategy = Some(loadBalancingStrategy))
def copy(database: String = database,
collection: String = collection,
partitioner: QueryArangoPartition = partitioner,
hosts: Option[String] = hosts,
user: Option[String] = user,
password: Option[String] = password,
useSsl: Option[Boolean] = useSsl,
sslKeyStoreFile: Option[String] = sslKeyStoreFile,
sslPassPhrase: Option[String] = sslPassPhrase,
sslProtocol: Option[String] = sslProtocol,
protocol: Option[Protocol] = protocol,
maxConnections: Option[Int] = maxConnections,
acquireHostList: Option[Boolean] = acquireHostList,
acquireHostListInterval: Option[Int] = acquireHostListInterval,
loadBalancingStrategy: Option[LoadBalancingStrategy] = loadBalancingStrategy): ReadOptions = {
ReadOptions(database, collection, partitioner, hosts, user, password, useSsl, sslKeyStoreFile, sslPassPhrase, sslProtocol, protocol, maxConnections, acquireHostList, acquireHostListInterval, loadBalancingStrategy)
}
}

View File

@@ -0,0 +1,124 @@
/*
* DISCLAIMER
*
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright holder is ArangoDB GmbH, Cologne, Germany
*
* author Mark - mark at arangodb.com
*/
package cn.ac.iie.spark.rdd
import javax.net.ssl.SSLContext
import com.arangodb.Protocol
import com.arangodb.entity.LoadBalancingStrategy
case class WriteOptions(override val database: String = "_system",
val method: WriteOptions.Method = WriteOptions.INSERT,
override val hosts: Option[String] = None,
override val user: Option[String] = None,
override val password: Option[String] = None,
override val useSsl: Option[Boolean] = None,
override val sslKeyStoreFile: Option[String] = None,
override val sslPassPhrase: Option[String] = None,
override val sslProtocol: Option[String] = None,
override val protocol: Option[Protocol] = None,
override val maxConnections: Option[Int] = None,
override val acquireHostList: Option[Boolean] = None,
override val acquireHostListInterval: Option[Int] = None,
override val loadBalancingStrategy: Option[LoadBalancingStrategy] = None) extends ArangoOptions {
import WriteOptions._
def this() = this(database = "_system")
def database(database: String): WriteOptions = copy(database = database)
def method(method: Method): WriteOptions = copy(method = method)
def hosts(hosts: String): WriteOptions = copy(hosts = Some(hosts))
def user(user: String): WriteOptions = copy(user = Some(user))
def password(password: String): WriteOptions = copy(password = Some(password))
def useSsl(useSsl: Boolean): WriteOptions = copy(useSsl = Some(useSsl))
def sslKeyStoreFile(sslKeyStoreFile: String): WriteOptions = copy(sslKeyStoreFile = Some(sslKeyStoreFile))
def sslPassPhrase(sslPassPhrase: String): WriteOptions = copy(sslPassPhrase = Some(sslPassPhrase))
def sslProtocol(sslProtocol: String): WriteOptions = copy(sslProtocol = Some(sslProtocol))
def protocol(protocol: Protocol): WriteOptions = copy(protocol = Some(protocol))
def maxConnections(maxConnections: Int): WriteOptions = copy(maxConnections = Some(maxConnections))
def acquireHostList(acquireHostList: Boolean): WriteOptions = copy(acquireHostList = Some(acquireHostList))
def acquireHostListInterval(acquireHostListInterval: Int): WriteOptions = copy(acquireHostListInterval = Some(acquireHostListInterval))
def loadBalancingStrategy(loadBalancingStrategy: LoadBalancingStrategy): WriteOptions = copy(loadBalancingStrategy = Some(loadBalancingStrategy))
def copy(database: String = database,
method: Method = method,
hosts: Option[String] = hosts,
user: Option[String] = user,
password: Option[String] = password,
useSsl: Option[Boolean] = useSsl,
sslKeyStoreFile: Option[String] = sslKeyStoreFile,
sslPassPhrase: Option[String] = sslPassPhrase,
sslProtocol: Option[String] = sslProtocol,
protocol: Option[Protocol] = protocol,
maxConnections: Option[Int] = maxConnections,
acquireHostList: Option[Boolean] = acquireHostList,
acquireHostListInterval: Option[Int] = acquireHostListInterval,
loadBalancingStrategy: Option[LoadBalancingStrategy] = loadBalancingStrategy): WriteOptions = {
WriteOptions(database, method, hosts, user, password, useSsl, sslKeyStoreFile, sslPassPhrase, sslProtocol, protocol, maxConnections, acquireHostList, acquireHostListInterval, loadBalancingStrategy)
}
}
object WriteOptions {
/**
* method to save documents to arangodb
*/
sealed trait Method
/**
* save documents by inserting
* @see [[com.arangodb.ArangoCollection#insertDocuments(java.util.Collection)]]
*/
case object INSERT extends Method
/**
* save documents by updating
* @see [[com.arangodb.ArangoCollection#updateDocuments(java.util.Collection)]]
*/
case object UPDATE extends Method
/**
* save documents by replacing
* @see [[com.arangodb.ArangoCollection#replaceDocuments(java.util.Collection)]]
*/
case object REPLACE extends Method
/**
* save documents by overwrite
*/
case object OVERWRITE extends Method
}

View File

@@ -0,0 +1,139 @@
/*
* DISCLAIMER
*
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright holder is ArangoDB GmbH, Cologne, Germany
*
* author Mark - mark at arangodb.com
*/
package cn.ac.iie
import java.io.FileInputStream
import java.security.KeyStore
import cn.ac.iie.spark.rdd.{ArangoOptions, ReadOptions, WriteOptions}
import com.arangodb.{ArangoDB, ArangoDBException, Protocol}
import com.arangodb.entity.LoadBalancingStrategy
import com.arangodb.velocypack.module.jdk8.VPackJdk8Module
import com.arangodb.velocypack.module.scala.VPackScalaModule
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}
import org.apache.spark.SparkConf
import scala.util.Try
package object spark {
val PropertyHosts = "arangodb.hosts"
val PropertyUser = "arangodb.user"
val PropertyPassword = "arangodb.password"
val PropertyUseSsl = "arangodb.useSsl"
val PropertySslKeyStoreFile = "arangodb.ssl.keyStoreFile"
val PropertySslPassPhrase = "arangodb.ssl.passPhrase"
val PropertySslProtocol = "arangodb.ssl.protocol"
val PropertyProtocol = "arangodb.protocol"
val PropertyMaxConnections = "arangodb.maxConnections"
val PropertyAcquireHostList = "arangodb.acquireHostList"
val PropertyAcquireHostListInterval = "arangodb.acquireHostListInterval"
val PropertyLoadBalancingStrategy = "arangodb.loadBalancingStrategy"
private[spark] def createReadOptions(options: ReadOptions, sc: SparkConf): ReadOptions = {
options.copy(
hosts = options.hosts.orElse(some(sc.get(PropertyHosts, null))),
user = options.user.orElse(some(sc.get(PropertyUser, null))),
password = options.password.orElse(some(sc.get(PropertyPassword, null))),
useSsl = options.useSsl.orElse(some(Try(sc.get(PropertyUseSsl, null).toBoolean).getOrElse(false))),
sslKeyStoreFile = options.sslKeyStoreFile.orElse(some(sc.get(PropertySslKeyStoreFile, null))),
sslPassPhrase = options.sslPassPhrase.orElse(some(sc.get(PropertySslPassPhrase, null))),
sslProtocol = options.sslProtocol.orElse(some(sc.get(PropertySslProtocol, null))),
protocol = options.protocol.orElse(some(Protocol.valueOf(sc.get(PropertyProtocol, "VST")))),
maxConnections = options.maxConnections.orElse(some(Try(sc.get(PropertyMaxConnections, null).toInt).getOrElse(1))),
acquireHostList = options.acquireHostList.orElse(some(Try(sc.get(PropertyAcquireHostList, null).toBoolean).getOrElse(false))),
acquireHostListInterval = options.acquireHostListInterval.orElse(some(Try(sc.get(PropertyAcquireHostListInterval, null).toInt).getOrElse(60000))),
loadBalancingStrategy = options.loadBalancingStrategy.orElse(some(LoadBalancingStrategy.valueOf(sc.get(PropertyLoadBalancingStrategy, "NONE")))))
}
private[spark] def createWriteOptions(options: WriteOptions, sc: SparkConf): WriteOptions = {
options.copy(
hosts = options.hosts.orElse(some(sc.get(PropertyHosts, null))),
user = options.user.orElse(some(sc.get(PropertyUser, null))),
password = options.password.orElse(some(sc.get(PropertyPassword, null))),
useSsl = options.useSsl.orElse(some(Try(sc.get(PropertyUseSsl, null).toBoolean).getOrElse(false))),
sslKeyStoreFile = options.sslKeyStoreFile.orElse(some(sc.get(PropertySslKeyStoreFile, null))),
sslPassPhrase = options.sslPassPhrase.orElse(some(sc.get(PropertySslPassPhrase, null))),
sslProtocol = options.sslProtocol.orElse(some(sc.get(PropertySslProtocol, null))),
protocol = options.protocol.orElse(some(Protocol.valueOf(sc.get(PropertyProtocol, "VST")))),
maxConnections = options.maxConnections.orElse(some(Try(sc.get(PropertyMaxConnections, null).toInt).getOrElse(1))),
acquireHostList = options.acquireHostList.orElse(some(Try(sc.get(PropertyAcquireHostList, null).toBoolean).getOrElse(false))),
acquireHostListInterval = options.acquireHostListInterval.orElse(some(Try(sc.get(PropertyAcquireHostListInterval, null).toInt).getOrElse(60000))),
loadBalancingStrategy = options.loadBalancingStrategy.orElse(some(LoadBalancingStrategy.valueOf(sc.get(PropertyLoadBalancingStrategy, "NONE")))))
}
private[spark] def createArangoBuilder(options: ArangoOptions): ArangoDB.Builder = {
val builder = new ArangoDB.Builder()
builder.registerModules(new VPackJdk8Module, new VPackScalaModule)
options.hosts.foreach { hosts(_).foreach(host => builder.host(host._1, host._2)) }
options.user.foreach { builder.user(_) }
options.password.foreach { builder.password(_) }
options.useSsl.foreach { builder.useSsl(_) }
if (options.sslKeyStoreFile.isDefined && options.sslPassPhrase.isDefined) {
builder.sslContext(createSslContext(options.sslKeyStoreFile.get, options.sslPassPhrase.get, options.sslProtocol.getOrElse("TLS")))
}
options.protocol.foreach { builder.useProtocol(_) }
options.maxConnections.foreach { builder.maxConnections(_) }
options.acquireHostList.foreach { builder.acquireHostList(_) }
options.acquireHostListInterval.foreach { builder.acquireHostListInterval(_) }
options.loadBalancingStrategy.foreach { builder.loadBalancingStrategy(_) }
builder
}
private def createSslContext(keyStoreFile: String, passPhrase: String, protocol: String): SSLContext = {
val ks = KeyStore.getInstance(KeyStore.getDefaultType());
val kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
ks.load(new FileInputStream(keyStoreFile), passPhrase.toCharArray());
kmf.init(ks, passPhrase.toCharArray());
val tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ks);
val sc = SSLContext.getInstance(protocol);
sc.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
sc
}
private def some(value: String): Option[String] =
if (value != null) Some(value) else None
private def some(value: Int): Option[Int] =
Some(value)
private def some(value: Boolean): Option[Boolean] =
Some(value)
private def some(value: Protocol): Option[Protocol] =
Some(value)
private def some(value: LoadBalancingStrategy): Option[LoadBalancingStrategy] =
Some(value)
private def hosts(hosts: String): List[(String, Int)] =
hosts.split(",").map({ x =>
val s = x.split(":")
if (s.length != 2 || !s(1).matches("[0-9]+"))
throw new ArangoDBException(s"Could not load property-value arangodb.hosts=${s}. Expected format ip:port,ip:port,...");
else
(s(0), s(1).toInt)
}).toList
}

View File

@@ -0,0 +1,160 @@
/*
* DISCLAIMER
*
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright holder is ArangoDB GmbH, Cologne, Germany
*
* author Mark - mark at arangodb.com
*/
package cn.ac.iie.spark.vpack
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{
ArrayType,
BooleanType,
DataType,
DateType,
DecimalType,
DoubleType,
FloatType,
IntegerType,
LongType,
MapType,
NullType,
ShortType,
StringType,
StructField,
StructType,
TimestampType
}
import com.arangodb.velocypack.VPackBuilder
import com.arangodb.velocypack.VPackSlice
import com.arangodb.velocypack.ValueType
private[spark] object VPackUtils {
def rowToVPack(row: Row): VPackSlice = {
val builder = new VPackBuilder()
if (row == null) {
builder.add(ValueType.NULL)
} else {
builder.add(ValueType.OBJECT)
row.schema.fields.zipWithIndex.foreach { addField(_, row, builder) }
builder.close()
}
builder.slice()
}
private def addField(field: (StructField, Int), row: Row, builder: VPackBuilder): Unit = {
val name = field._1.name
val index = field._2
if (row.isNullAt(index)) {
builder.add(name, ValueType.NULL)
} else {
field._1.dataType match {
case BooleanType => builder.add(name, java.lang.Boolean.valueOf(row.getBoolean(index)))
case DoubleType => builder.add(name, java.lang.Double.valueOf(row.getDouble(index)))
case FloatType => builder.add(name, java.lang.Float.valueOf(row.getFloat(index)))
case LongType => builder.add(name, java.lang.Long.valueOf(row.getLong(index)))
case IntegerType => builder.add(name, java.lang.Integer.valueOf(row.getInt(index)))
case ShortType => builder.add(name, java.lang.Short.valueOf(row.getShort(index)))
case StringType => builder.add(name, java.lang.String.valueOf(row.getString(index)));
case DateType => builder.add(name, row.getDate(index))
case TimestampType => builder.add(name, row.getTimestamp(index))
case t: DecimalType => builder.add(name, row.getDecimal(index))
case t: MapType => {
builder.add(name, ValueType.OBJECT)
row.getMap[String, Any](index).foreach { case (name, value) => addValue(name, value, builder) }
builder.close()
}
case t: ArrayType => {
builder.add(name, ValueType.ARRAY)
addValues(row, index, builder, t.elementType)
builder.close()
}
case NullType => builder.add(name, ValueType.NULL)
case struct: StructType => builder.add(name, rowToVPack(row.getStruct(index)))
case _ => // TODO
}
}
}
private def addValues(row: Row, index: Int, builder: VPackBuilder, itemType: DataType): Unit = {
itemType match {
case BooleanType =>
row.getSeq[Boolean](index).foreach { value =>
addValue(null, value, builder)
}
case DoubleType =>
row.getSeq[Double](index).foreach { value =>
addValue(null, value, builder)
}
case FloatType =>
row.getSeq[Float](index).foreach { value =>
addValue(null, value, builder)
}
case LongType =>
row.getSeq[Long](index).foreach { value =>
addValue(null, value, builder)
}
case IntegerType =>
row.getSeq[Int](index).foreach { value =>
addValue(null, value, builder)
}
case ShortType =>
row.getSeq[Short](index).foreach { value =>
addValue(null, value, builder)
}
case StringType =>
row.getSeq[String](index).foreach { value =>
addValue(null, value, builder)
}
case DateType =>
row.getSeq[java.sql.Date](index).foreach { value =>
addValue(null, value, builder)
}
case TimestampType =>
row.getSeq[java.sql.Timestamp](index).foreach { value =>
addValue(null, value, builder)
}
case s: StructType => {
row.getSeq[Row](index).foreach { value =>
builder.add(null, rowToVPack(value))
}
}
case t: MapType => // TODO
case t: ArrayType => // TODO
case _ => // TODO
}
}
private def addValue(name: String, value: Any, builder: VPackBuilder): Unit = {
value match {
case value: Boolean => builder.add(name, java.lang.Boolean.valueOf(value))
case value: Double => builder.add(name, java.lang.Double.valueOf(value))
case value: Float => builder.add(name, java.lang.Float.valueOf(value))
case value: Long => builder.add(name, java.lang.Long.valueOf(value))
case value: Int => builder.add(name, java.lang.Integer.valueOf(value))
case value: Short => builder.add(name, java.lang.Short.valueOf(value))
case value: String => builder.add(name, java.lang.String.valueOf(value))
case value: java.sql.Date => builder.add(name, value)
case value: java.sql.Timestamp => builder.add(name, value)
case _ => // TODO
}
}
}

View File

@@ -1,7 +1,9 @@
package cn.ac.iie.utils package cn.ac.iie.utils
import cn.ac.iie.config.ApplicationConfig import cn.ac.iie.config.ApplicationConfig
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession import org.apache.spark.sql.SparkSession
import org.apache.spark.util.LongAccumulator
import org.slf4j.LoggerFactory import org.slf4j.LoggerFactory
object SparkSessionUtil { object SparkSessionUtil {
@@ -9,6 +11,8 @@ object SparkSessionUtil {
val spark: SparkSession = getSparkSession val spark: SparkSession = getSparkSession
var sparkContext: SparkContext = getContext
private def getSparkSession: SparkSession ={ private def getSparkSession: SparkSession ={
val spark: SparkSession = SparkSession val spark: SparkSession = SparkSession
.builder() .builder()
@@ -17,16 +21,36 @@ object SparkSessionUtil {
.config("spark.network.timeout", ApplicationConfig.SPARK_NETWORK_TIMEOUT) .config("spark.network.timeout", ApplicationConfig.SPARK_NETWORK_TIMEOUT)
.config("spark.sql.shuffle.partitions", ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS) .config("spark.sql.shuffle.partitions", ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY) .config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY)
.config("arangodb.hosts", s"${ApplicationConfig.ARANGODB_HOST}:${ApplicationConfig.ARANGODB_PORT}")
.config("arangodb.user", ApplicationConfig.ARANGODB_USER)
.config("arangodb.password", ApplicationConfig.ARANGODB_PASSWORD)
.master(ApplicationConfig.MASTER) .master(ApplicationConfig.MASTER)
.getOrCreate() .getOrCreate()
LOG.warn("sparkession获取成功") LOG.warn("sparkession获取成功")
spark spark
} }
def getContext: SparkContext = {
@transient var sc: SparkContext = null
if (sparkContext == null) sc = spark.sparkContext
sc
}
def getLongAccumulator(name: String): LongAccumulator ={
if (sparkContext == null){
sparkContext = getContext
}
sparkContext.longAccumulator(name)
}
def closeSpark(): Unit ={ def closeSpark(): Unit ={
if (spark != null){ if (spark != null){
spark.stop() spark.stop()
} }
if (sparkContext != null){
sparkContext.stop()
}
} }
} }

View File

@@ -7,7 +7,7 @@ import org.apache.spark.sql.SparkSession
object BaseClickhouseDataTest { object BaseClickhouseDataTest {
private val spark: SparkSession = SparkSessionUtil.spark private val spark: SparkSession = SparkSessionUtil.spark
def main(args: Array[String]): Unit = { def main(args: Array[String]): Unit = {
BaseClickhouseData loadConnectionDataFromCk() // BaseClickhouseData loadConnectionDataFromCk()
val sql = val sql =
""" """
|SELECT |SELECT

View File

@@ -1,35 +0,0 @@
package cn.ac.iie.service.update
import java.util
import java.util.ArrayList
import java.util.concurrent.ConcurrentHashMap
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocumentTest {
def main(args: Array[String]): Unit = {
val baseArangoData = new BaseArangoData()
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
while (value.hasMoreElements) {
val integer: Integer = value.nextElement()
val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
val unit = map.keys()
while (unit.hasMoreElements) {
val key = unit.nextElement()
val edgeDocument = map.get(key)
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
println(longs.toString + "---" + strings.toString)
}
}
}
}

View File

@@ -0,0 +1,75 @@
package cn.ac.iie.spark
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseClickhouseData
import cn.ac.iie.spark.partition.CustomPartitioner
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions}
import cn.ac.iie.utils.SparkSessionUtil
import com.arangodb.entity.BaseDocument
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions.{collect_list, max, min}
import org.apache.spark.storage.StorageLevel
object RDDTest {
def main(args: Array[String]): Unit = {
val sparkContext = SparkSessionUtil.spark.sparkContext
println(sparkContext.getConf.get("arangodb.hosts"))
// val options = ReadOptions("iplearn_media_domain").copy(collection = "R_LOCATE_FQDN2IP")
val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
val ipOptions = options.copy(collection = "IP")
val rdd: ArangoRdd[BaseDocument] = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
println(rdd.count())
println(rdd.getNumPartitions)
val ipRDD = mergeVertexIp()
val value: RDD[(String, (Option[BaseDocument], Option[Row]))] = rdd.map(doc => {
(doc.getKey, doc)
}).fullOuterJoin(ipRDD)
value.foreach((row: (String, (Option[BaseDocument], Option[Row]))) => {
val value = row._2._2
val str: String = value match {
case Some(r) => r.getAs[String]("IP")
// case None => null
case _ => null
}
println(str)
})
/*
val value: RDD[BaseDocument] = rdd.filter(doc => doc.getAttribute("CLIENT_SESSION_COUNT").asInstanceOf[Long] > 100).map(doc => {
doc.addAttribute("abc", 1)
doc
})
value.map(doc => {(doc.getKey,doc)})
value.persist(StorageLevel.MEMORY_AND_DISK)
value.foreach(fqdnRow => println(fqdnRow.toString))
println(value.count())
*/
SparkSessionUtil.spark.close()
System.exit(0)
}
def mergeVertexIp(): RDD[(String,Row)]={
val vertexIpDf = BaseClickhouseData.getVertexIpDf
val frame = vertexIpDf.groupBy("IP").agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
collect_list("ip_type").alias("ip_type_list")
)
val values = frame.rdd.map(row => (row.getAs[String]("IP"), row))
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
values
}
}