Compare commits
10 Commits
master
...
ip-learnin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
191092f210 | ||
|
|
a563591051 | ||
|
|
6136635b7b | ||
|
|
e5f30f5bfd | ||
|
|
9c2831013e | ||
|
|
77b4d1e758 | ||
|
|
ad1bef2466 | ||
|
|
abb3b4162b | ||
|
|
0faaeee7c2 | ||
|
|
86b484e7b4 |
@@ -39,12 +39,6 @@
|
||||
<version>1.2.1</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.arangodb</groupId>
|
||||
<artifactId>arangodb-java-driver</artifactId>
|
||||
|
||||
@@ -42,10 +42,10 @@ public class BaseArangoData {
|
||||
}
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
||||
// long[] timeRange = getTimeRange(table);
|
||||
Long countTotal = getCountTotal(table);
|
||||
Long total = getCountTotal(table);
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||
// String sql = getQuerySql(timeRange, i, table);
|
||||
String sql = getQuerySql(countTotal, i, table);
|
||||
String sql = getQuerySql(total, i, table);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData =
|
||||
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
|
||||
threadPool.executor(readHistoryArangoData);
|
||||
@@ -58,57 +58,6 @@ public class BaseArangoData {
|
||||
}
|
||||
}
|
||||
|
||||
public <T extends BaseDocument> ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> readHistoryData(String table, Class<T> type){
|
||||
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map = new ConcurrentHashMap<>();
|
||||
try {
|
||||
LOG.info("开始更新"+table);
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
|
||||
map.put(i,new ConcurrentHashMap<>());
|
||||
}
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
||||
Long countTotal = getCountTotal(table);
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||
String sql = getQuerySql(countTotal, i, table);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData =
|
||||
new ReadHistoryArangoData<>(arangoDBConnect, sql, map,type,table,countDownLatch);
|
||||
threadPool.executor(readHistoryArangoData);
|
||||
}
|
||||
countDownLatch.await();
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info("读取"+table+" arangoDB 共耗时:"+(last-start));
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error("读取历史数据失败 "+e.toString());
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
private long[] getTimeRange(String table){
|
||||
long minTime = 0L;
|
||||
long maxTime = 0L;
|
||||
long startTime = System.currentTimeMillis();
|
||||
String sql = "LET doc = (FOR doc IN "+table+" RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
||||
try {
|
||||
if (timeDoc != null){
|
||||
while (timeDoc.hasNext()) {
|
||||
BaseDocument doc = timeDoc.next();
|
||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
||||
}
|
||||
long lastTime = System.currentTimeMillis();
|
||||
LOG.info(sql+"\n查询最大最小时间用时:" + (lastTime - startTime));
|
||||
}else {
|
||||
LOG.warn("获取ArangoDb时间范围为空");
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}
|
||||
return new long[]{minTime, maxTime};
|
||||
|
||||
}
|
||||
|
||||
private Long getCountTotal(String table){
|
||||
long start = System.currentTimeMillis();
|
||||
Long cnt = 0L;
|
||||
@@ -133,6 +82,31 @@ public class BaseArangoData {
|
||||
}
|
||||
|
||||
|
||||
private long[] getTimeRange(String table){
|
||||
long minTime = 0L;
|
||||
long maxTime = 0L;
|
||||
long startTime = System.currentTimeMillis();
|
||||
String sql = "LET doc = (FOR doc IN "+table+" RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
||||
try {
|
||||
if (timeDoc != null){
|
||||
while (timeDoc.hasNext()) {
|
||||
BaseDocument doc = timeDoc.next();
|
||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
|
||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
||||
}
|
||||
long lastTime = System.currentTimeMillis();
|
||||
LOG.info(sql+"\n查询最大最小时间用时:" + (lastTime - startTime));
|
||||
}else {
|
||||
LOG.warn("获取ArangoDb时间范围为空");
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}
|
||||
return new long[]{minTime, maxTime};
|
||||
|
||||
}
|
||||
|
||||
private String getQuerySql(long[] timeRange,int threadNumber,String table){
|
||||
long minTime = timeRange[0];
|
||||
long maxTime = timeRange[1];
|
||||
|
||||
@@ -73,51 +73,5 @@ public class BaseClickhouseData {
|
||||
}
|
||||
}
|
||||
|
||||
public <T extends BaseDocument> HashMap<Integer, HashMap<String,ArrayList<T>>> baseDocFromCk(Supplier<String> getSqlSupplier,
|
||||
Function<ResultSet,T> formatResultFunc){
|
||||
long start = System.currentTimeMillis();
|
||||
HashMap<Integer, HashMap<String, ArrayList<T>>> newDataMap = initializeMap();
|
||||
if (newDataMap == null){
|
||||
return null;
|
||||
}
|
||||
String sql = getSqlSupplier.get();
|
||||
try {
|
||||
connection = manger.getConnection();
|
||||
statement = connection.createStatement();
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
int i = 0;
|
||||
while (resultSet.next()) {
|
||||
T newDoc = formatResultFunc.apply(resultSet);
|
||||
if (newDoc != null) {
|
||||
i+=1;
|
||||
putMapByHashcode(newDoc, newDataMap);
|
||||
}
|
||||
}
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info(sql + "\n读取"+i+"条数据,运行时间:" + (last - start));
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error("获取原始数据失败 "+e.toString());
|
||||
}finally {
|
||||
manger.clear(statement,connection);
|
||||
}
|
||||
return newDataMap;
|
||||
}
|
||||
|
||||
private <T extends BaseDocument> HashMap<Integer, HashMap<String,ArrayList<T>>> initializeMap(){
|
||||
try {
|
||||
HashMap<Integer, HashMap<String, ArrayList<T>>> newDataMap = new HashMap<>();
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
|
||||
newDataMap.put(i, new HashMap<>());
|
||||
}
|
||||
return newDataMap;
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error("数据初始化失败 "+e.toString());
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -44,14 +44,14 @@ public class UpdateGraphData {
|
||||
long start = System.currentTimeMillis();
|
||||
try {
|
||||
|
||||
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
||||
// updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
|
||||
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
||||
|
||||
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
||||
// updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
|
||||
// ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
||||
|
||||
updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", Subscriber.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
|
||||
// updateDocument(newVertexSubscriberMap,historyVertexSubscriberMap,"SUBSCRIBER", Subscriber.class,BaseDocument.class,
|
||||
// ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
|
||||
|
||||
updateDocument(newRelationFqdnAddressIpMap,historyRelationFqdnAddressIpMap,"R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
||||
@@ -60,44 +60,9 @@ public class UpdateGraphData {
|
||||
// VisitIp2Fqdn.class,BaseEdgeDocument.class,
|
||||
// ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
|
||||
|
||||
updateDocument(newRelationSubsciberLocateIpMap,historyRelationSubsciberLocateIpMap,"R_LOCATE_SUBSCRIBER2IP",
|
||||
LocateSubscriber2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
|
||||
|
||||
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info("iplearning application运行完毕,用时:"+(last - start));
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}finally {
|
||||
arangoManger.clean();
|
||||
pool.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
public void updateArango2(){
|
||||
long start = System.currentTimeMillis();
|
||||
try {
|
||||
|
||||
updateDocument("FQDN", Fqdn.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
||||
|
||||
updateDocument("IP", Ip.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
||||
|
||||
updateDocument("SUBSCRIBER", Subscriber.class,BaseDocument.class,
|
||||
ReadClickhouseData::getVertexSubscriberSql,ReadClickhouseData::getVertexSubscriberDocument);
|
||||
|
||||
updateDocument("R_LOCATE_FQDN2IP", LocateFqdn2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipFqdnAddressIpSql,ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
||||
|
||||
// updateDocument("R_VISIT_IP2FQDN",
|
||||
// VisitIp2Fqdn.class,BaseEdgeDocument.class,
|
||||
// ReadClickhouseData::getRelationshipIpVisitFqdnSql,ReadClickhouseData::getRelationIpVisitFqdnDocument);
|
||||
|
||||
updateDocument("R_LOCATE_SUBSCRIBER2IP",
|
||||
LocateSubscriber2Ip.class,BaseEdgeDocument.class,
|
||||
ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
|
||||
// updateDocument(newRelationSubsciberLocateIpMap,historyRelationSubsciberLocateIpMap,"R_LOCATE_SUBSCRIBER2IP",
|
||||
// LocateSubscriber2Ip.class,BaseEdgeDocument.class,
|
||||
// ReadClickhouseData::getRelationshipSubsciberLocateIpSql,ReadClickhouseData::getRelationshipSubsciberLocateIpDocument);
|
||||
|
||||
|
||||
long last = System.currentTimeMillis();
|
||||
@@ -151,45 +116,5 @@ public class UpdateGraphData {
|
||||
}
|
||||
}
|
||||
|
||||
private <T extends BaseDocument> void updateDocument(String collection,
|
||||
Class<? extends Document<T>> taskType,
|
||||
Class<T> docmentType,
|
||||
Supplier<String> getSqlSupplier,
|
||||
Function<ResultSet,T> formatResultFunc){
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyData = baseArangoData.readHistoryData(collection, docmentType);
|
||||
LOG.info(collection+" 读取clickhouse,封装结果集");
|
||||
HashMap<Integer, HashMap<String, ArrayList<T>>> newData = baseClickhouseData.baseDocFromCk(getSqlSupplier, formatResultFunc);
|
||||
try {
|
||||
LOG.info(collection+" 开始更新");
|
||||
long start = System.currentTimeMillis();
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++){
|
||||
HashMap<String, ArrayList<T>> tmpNewMap = newData.get(i);
|
||||
ConcurrentHashMap<String, T> tmpHisMap = historyData.get(i);
|
||||
Constructor constructor = taskType.getConstructor(
|
||||
HashMap.class,
|
||||
ArangoDBConnect.class,
|
||||
String.class,
|
||||
ConcurrentHashMap.class,
|
||||
CountDownLatch.class);
|
||||
Document docTask = (Document)constructor.newInstance(tmpNewMap, arangoManger, collection, tmpHisMap, countDownLatch);
|
||||
pool.executor(docTask);
|
||||
}
|
||||
countDownLatch.await();
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info(collection+" 更新完毕,共耗时:"+(last-start));
|
||||
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
LOG.error("更新"+collection+"失败!!"+e.toString());
|
||||
}finally {
|
||||
newData.clear();
|
||||
historyData.clear();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -270,9 +270,9 @@ public class ReadClickhouseData {
|
||||
}
|
||||
|
||||
public static String getRelationshipFqdnAddressIpSql() {
|
||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
|
||||
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
|
||||
String where = " common_end_time >= " + minTime + " AND common_end_time < " + maxTime;
|
||||
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_end_time) AS LAST_FOUND_TIME,MIN(common_end_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
|
||||
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_end_time) AS LAST_FOUND_TIME,MIN(common_end_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
|
||||
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
||||
}
|
||||
|
||||
|
||||
@@ -97,6 +97,11 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
private void deleteDistinctClientIpByTime(T doc) {
|
||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
||||
if (distCip == null || distCip.isEmpty()){
|
||||
doc.updateAttribute("DIST_CIP", new String[0]);
|
||||
doc.updateAttribute("DIST_CIP_TS", new long[0]);
|
||||
return;
|
||||
}
|
||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
Collections.sort(distCipTs);
|
||||
Collections.reverse(distCipTs);
|
||||
|
||||
@@ -6,8 +6,7 @@ public class IpLearningApplicationTest {
|
||||
|
||||
public static void main(String[] args) {
|
||||
UpdateGraphData updateGraphData = new UpdateGraphData();
|
||||
// updateGraphData.updateArango();
|
||||
updateGraphData.updateArango2();
|
||||
updateGraphData.updateArango();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ arangoDB.host=192.168.40.182
|
||||
arangoDB.port=8529
|
||||
arangoDB.user=upsert
|
||||
arangoDB.password=ceiec2018
|
||||
arangoDB.DB.name=ip-learning-test
|
||||
arangoDB.DB.name=ip-learning-test-0
|
||||
#arangoDB.DB.name=tsg_galaxy_v3
|
||||
arangoDB.batch=100000
|
||||
arangoDB.ttl=3600
|
||||
@@ -12,16 +12,16 @@ arangoDB.ttl=3600
|
||||
arangoDB.read.limit=
|
||||
update.arango.batch=10000
|
||||
|
||||
thread.pool.number=10
|
||||
thread.pool.number=80
|
||||
thread.await.termination.time=10
|
||||
|
||||
|
||||
#读取clickhouse时间范围方式,0:读取过去一小时,1:指定时间范围
|
||||
time.limit.type=0
|
||||
read.clickhouse.max.time=1596684142
|
||||
read.clickhouse.min.time=1596425769
|
||||
time.limit.type=1
|
||||
read.clickhouse.max.time=1598246519
|
||||
read.clickhouse.min.time=1597161600
|
||||
|
||||
update.interval=3600
|
||||
distinct.client.ip.num=10000
|
||||
distinct.client.ip.num=1
|
||||
recent.count.hour=24
|
||||
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package cn.ac.iie;
|
||||
|
||||
import cn.ac.iie.dao.BaseArangoData;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Enumeration;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
public class TestReadArango {
|
||||
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
|
||||
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
|
||||
|
||||
private static BaseArangoData baseArangoData = new BaseArangoData();
|
||||
|
||||
|
||||
@Test
|
||||
public void testReadFqdnFromArango() {
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyData =
|
||||
baseArangoData.readHistoryData("FQDN", BaseDocument.class);
|
||||
printMap(historyData);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadFqdnLocIpFromArango() {
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> ip =
|
||||
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", BaseEdgeDocument.class);
|
||||
printMap(ip);
|
||||
}
|
||||
|
||||
private <T extends BaseDocument> void printMap(ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyData) {
|
||||
ConcurrentHashMap<String, T> map = historyData.get(2);
|
||||
Enumeration<String> keys = map.keys();
|
||||
while (keys.hasMoreElements()) {
|
||||
String key = keys.nextElement();
|
||||
T document = map.get(key);
|
||||
System.out.println(document.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@After
|
||||
public void clearSource() {
|
||||
pool.shutdown();
|
||||
arangoManger.clean();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
package cn.ac.iie;
|
||||
|
||||
import cn.ac.iie.dao.BaseClickhouseData;
|
||||
import cn.ac.iie.service.ingestion.ReadClickhouseData;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Set;
|
||||
|
||||
public class TestReadClickhouse {
|
||||
|
||||
private static BaseClickhouseData baseClickhouseData = new BaseClickhouseData();
|
||||
|
||||
@Test
|
||||
public void testReadFqdnFromCk(){
|
||||
|
||||
|
||||
HashMap<Integer, HashMap<String, ArrayList<BaseDocument>>> newData =
|
||||
baseClickhouseData.baseDocFromCk(ReadClickhouseData::getVertexFqdnSql,
|
||||
ReadClickhouseData::getVertexFqdnDocument);
|
||||
printMap(newData);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadFqdnLocIpFromCk(){
|
||||
HashMap<Integer, HashMap<String, ArrayList<BaseEdgeDocument>>> map =
|
||||
baseClickhouseData.baseDocFromCk(ReadClickhouseData::getRelationshipFqdnAddressIpSql,
|
||||
ReadClickhouseData::getRelationFqdnAddressIpDocument);
|
||||
|
||||
printMap(map);
|
||||
}
|
||||
|
||||
|
||||
private<T extends BaseDocument> void printMap(HashMap<Integer, HashMap<String, ArrayList<T>>> newData){
|
||||
HashMap<String, ArrayList<T>> map = newData.get(1);
|
||||
Set<String> strings = map.keySet();
|
||||
for (String key:strings){
|
||||
ArrayList<T> baseDocuments = map.get(key);
|
||||
System.out.println(baseDocuments.get(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,18 +63,6 @@
|
||||
<version>6.6.3</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.arangodb</groupId>
|
||||
<artifactId>velocypack-module-jdk8</artifactId>
|
||||
<version>1.1.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.arangodb</groupId>
|
||||
<artifactId>velocypack-module-scala_2.11</artifactId>
|
||||
<version>1.2.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.scala-lang</groupId>
|
||||
<artifactId>scala-library</artifactId>
|
||||
@@ -87,11 +75,6 @@
|
||||
<version>3.2.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.scala-lang.modules</groupId>
|
||||
<artifactId>scala-xml_2.11</artifactId>
|
||||
<version>1.0.4</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.scala-tools</groupId>
|
||||
|
||||
@@ -6,6 +6,7 @@ import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||
import com.arangodb.ArangoCursor;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import com.arangodb.entity.BaseEdgeDocument;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -19,13 +20,21 @@ import java.util.concurrent.CountDownLatch;
|
||||
*/
|
||||
public class BaseArangoData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
||||
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
|
||||
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
|
||||
|
||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
||||
|
||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
||||
|
||||
public <T extends BaseDocument> void readHistoryData(String table,
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
||||
Class<T> type) {
|
||||
public <T extends BaseDocument> ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> readHistoryData(String table, Class<T> type) {
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap = new ConcurrentHashMap<>();
|
||||
try {
|
||||
LOG.warn("开始更新" + table);
|
||||
long start = System.currentTimeMillis();
|
||||
@@ -34,10 +43,10 @@ public class BaseArangoData {
|
||||
}
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
|
||||
// long[] timeRange = getTimeRange(table);
|
||||
Long countTotal = getCountTotal(table);
|
||||
Long total = getCountTotal(table);
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
||||
// String sql = getQuerySql(timeRange, i, table);
|
||||
String sql = getQuerySql(countTotal, i, table);
|
||||
String sql = getQuerySql(total, i, table);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
|
||||
threadPool.executor(readHistoryArangoData);
|
||||
}
|
||||
@@ -47,6 +56,7 @@ public class BaseArangoData {
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return historyMap;
|
||||
}
|
||||
|
||||
private Long getCountTotal(String table){
|
||||
@@ -62,7 +72,7 @@ public class BaseArangoData {
|
||||
LOG.error(sql +"执行异常");
|
||||
}
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
|
||||
LOG.warn(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
|
||||
return cnt;
|
||||
}
|
||||
|
||||
@@ -72,4 +82,47 @@ public class BaseArangoData {
|
||||
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
||||
}
|
||||
|
||||
private long[] getTimeRange(String table) {
|
||||
long minTime = 0L;
|
||||
long maxTime = 0L;
|
||||
long startTime = System.currentTimeMillis();
|
||||
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
|
||||
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) {
|
||||
case 0:
|
||||
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
|
||||
try {
|
||||
if (timeDoc != null) {
|
||||
while (timeDoc.hasNext()) {
|
||||
BaseDocument doc = timeDoc.next();
|
||||
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
|
||||
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
|
||||
}
|
||||
} else {
|
||||
LOG.warn("获取ArangoDb时间范围为空");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
|
||||
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
|
||||
break;
|
||||
default:
|
||||
}
|
||||
long lastTime = System.currentTimeMillis();
|
||||
LOG.warn(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
|
||||
return new long[]{minTime, maxTime};
|
||||
|
||||
}
|
||||
|
||||
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
|
||||
long minTime = timeRange[0];
|
||||
long maxTime = timeRange[1];
|
||||
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER();
|
||||
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
|
||||
long minThreadTime = minTime + threadNumber * diffTime;
|
||||
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
public void run() {
|
||||
try {
|
||||
long s = System.currentTimeMillis();
|
||||
LOG.warn(query+" \n 开始查询");
|
||||
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
|
||||
if (docs != null) {
|
||||
List<T> baseDocuments = docs.asListRemaining();
|
||||
@@ -69,9 +70,9 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
updateProtocolDocument(doc);
|
||||
deleteDistinctClientIpByTime(doc);
|
||||
break;
|
||||
case "R_VISIT_IP2FQDN":
|
||||
updateProtocolDocument(doc);
|
||||
break;
|
||||
// case "R_VISIT_IP2FQDN":
|
||||
// updateProtocolDocument(doc);
|
||||
// break;
|
||||
default:
|
||||
}
|
||||
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER();
|
||||
@@ -107,6 +108,12 @@ public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
||||
private void deleteDistinctClientIpByTime(T doc) {
|
||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
||||
if (distCip == null || distCip.isEmpty()){
|
||||
doc.updateAttribute("DIST_CIP", new String[0]);
|
||||
doc.updateAttribute("DIST_CIP_TS", new long[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
Collections.sort(distCipTs);
|
||||
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
|
||||
|
||||
@@ -1,39 +1,48 @@
|
||||
#spark任务配置
|
||||
spark.sql.shuffle.partitions=10
|
||||
spark.sql.shuffle.partitions=5
|
||||
spark.executor.memory=4g
|
||||
spark.app.name=test
|
||||
spark.network.timeout=300s
|
||||
repartitionNumber=36
|
||||
spark.serializer=org.apache.spark.serializer.KryoSerializer
|
||||
master=local[*]
|
||||
#spark读取clickhouse配置
|
||||
spark.read.clickhouse.url=jdbc:clickhouse://192.168.44.67:8123/tsg_galaxy_v3
|
||||
#spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3
|
||||
spark.read.clickhouse.url=jdbc:clickhouse://192.168.44.12:8123/tsg_galaxy_v3
|
||||
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
|
||||
spark.read.clickhouse.user=default
|
||||
#spark.read.clickhouse.password=111111
|
||||
spark.read.clickhouse.password=ceiec2019
|
||||
spark.read.clickhouse.numPartitions=5
|
||||
spark.read.clickhouse.numPartitions=144
|
||||
spark.read.clickhouse.fetchsize=10000
|
||||
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
|
||||
#spark.read.clickhouse.partitionColumn=common_end_time
|
||||
spark.read.clickhouse.partitionColumn=FIRST_FOUND_TIME
|
||||
clickhouse.socket.timeout=300000
|
||||
#arangoDB配置
|
||||
arangoDB.host=192.168.40.182
|
||||
arangoDB.port=8529
|
||||
arangoDB.user=upsert
|
||||
arangoDB.password=ceiec2018
|
||||
#arangoDB.DB.name=insert_iplearn_index
|
||||
arangoDB.DB.name=ip-learning-test-0
|
||||
#arangoDB.DB.name=iplearn_media_domain
|
||||
arangoDB.ttl=3600
|
||||
|
||||
thread.pool.number=10
|
||||
thread.pool.number=5
|
||||
|
||||
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
||||
clickhouse.time.limit.type=1
|
||||
read.clickhouse.max.time=1603785961
|
||||
read.clickhouse.min.time=1603354682
|
||||
read.clickhouse.max.time=1598246519
|
||||
read.clickhouse.min.time=1597161600
|
||||
|
||||
arangoDB.read.limit=1
|
||||
#读取arangoDB时间范围方式,0:正常读;1:指定时间范围
|
||||
arango.time.limit.type=0
|
||||
read.arango.max.time=1598246519
|
||||
read.arango.min.time=1597161600
|
||||
|
||||
arangoDB.read.limit=
|
||||
update.arango.batch=10000
|
||||
|
||||
distinct.client.ip.num=10000
|
||||
distinct.client.ip.num=1
|
||||
recent.count.hour=24
|
||||
|
||||
update.interval=3600
|
||||
|
||||
@@ -36,7 +36,12 @@ object ApplicationConfig {
|
||||
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
|
||||
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
|
||||
|
||||
val ARANGODB_READ_LIMIT: Int = config.getInt("arangoDB.read.limit")
|
||||
val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type")
|
||||
|
||||
val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
|
||||
val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
|
||||
|
||||
val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
|
||||
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
|
||||
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
|
||||
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
|
||||
|
||||
@@ -28,16 +28,15 @@ object BaseClickhouseData {
|
||||
.load()
|
||||
dataFrame.printSchema()
|
||||
dataFrame.createOrReplaceGlobalTempView("dbtable")
|
||||
|
||||
dataFrame
|
||||
}
|
||||
|
||||
def loadConnectionDataFromCk(): Unit ={
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||
val where = "common_end_time >= " + timeLimit._2 + " AND common_end_time < " + timeLimit._1 + " and common_schema_type != 'BASE'"
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT
|
||||
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|
||||
| ssl_sni,http_host,common_client_ip,common_server_ip,common_end_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|
||||
|FROM
|
||||
| connection_record_log
|
||||
|WHERE $where) as dbtable
|
||||
@@ -47,6 +46,28 @@ object BaseClickhouseData {
|
||||
initClickhouseData(sql)
|
||||
}
|
||||
|
||||
def getRelationFqdnLocateIpDf(): DataFrame ={
|
||||
val where = "common_end_time >= " + timeLimit._2 + " AND common_end_time < " + timeLimit._1 + " and common_schema_type != 'BASE'"
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT * FROM
|
||||
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_end_time) AS LAST_FOUND_TIME,MIN(common_end_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
||||
|UNION ALL
|
||||
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_end_time) AS LAST_FOUND_TIME,MIN(common_end_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
||||
|WHERE FQDN != '') as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
private def loadRadiusDataFromCk(): Unit ={
|
||||
val where =
|
||||
s"""
|
||||
@@ -70,7 +91,41 @@ object BaseClickhouseData {
|
||||
initClickhouseData(sql)
|
||||
}
|
||||
|
||||
/*
|
||||
def getVertexFqdnDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
val sql =
|
||||
"""
|
||||
|SELECT
|
||||
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|
||||
|FROM
|
||||
| (
|
||||
| (SELECT
|
||||
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| WHERE
|
||||
| common_schema_type = 'SSL' GROUP BY ssl_sni
|
||||
| )
|
||||
| UNION ALL
|
||||
| (SELECT
|
||||
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| WHERE
|
||||
| common_schema_type = 'HTTP' GROUP BY http_host
|
||||
| )
|
||||
| )
|
||||
|GROUP BY
|
||||
| FQDN
|
||||
|HAVING
|
||||
| FQDN != ''
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val vertexFqdnDf = spark.sql(sql)
|
||||
vertexFqdnDf.printSchema()
|
||||
vertexFqdnDf
|
||||
}
|
||||
|
||||
def getVertexIpDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
val sql =
|
||||
@@ -114,6 +169,7 @@ object BaseClickhouseData {
|
||||
vertexIpDf
|
||||
}
|
||||
|
||||
/*
|
||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
val sslSql =
|
||||
@@ -121,10 +177,9 @@ object BaseClickhouseData {
|
||||
|SELECT
|
||||
| ssl_sni AS FQDN,
|
||||
| common_server_ip,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| MAX(common_end_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_end_time) AS FIRST_FOUND_TIME,
|
||||
| COUNT(*) AS COUNT_TOTAL,
|
||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
||||
| 'TLS' AS schema_type
|
||||
|FROM
|
||||
| global_temp.dbtable
|
||||
@@ -139,10 +194,9 @@ object BaseClickhouseData {
|
||||
|SELECT
|
||||
| http_host AS FQDN,
|
||||
| common_server_ip,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| MAX(common_end_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_end_time) AS FIRST_FOUND_TIME,
|
||||
| COUNT(*) AS COUNT_TOTAL,
|
||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
||||
| 'HTTP' AS schema_type
|
||||
|FROM
|
||||
| global_temp.dbtable
|
||||
@@ -160,148 +214,6 @@ object BaseClickhouseData {
|
||||
}
|
||||
*/
|
||||
|
||||
def getVertexFqdnDf: DataFrame ={
|
||||
val sql =
|
||||
"""
|
||||
|(SELECT
|
||||
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|
||||
|FROM
|
||||
| ((SELECT
|
||||
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM tsg_galaxy_v3.connection_record_log
|
||||
| WHERE common_schema_type = 'SSL' GROUP BY ssl_sni
|
||||
| )UNION ALL
|
||||
| (SELECT
|
||||
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM tsg_galaxy_v3.connection_record_log
|
||||
| WHERE common_schema_type = 'HTTP' GROUP BY http_host))
|
||||
|GROUP BY FQDN HAVING FQDN != '') as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getVertexIpDf: DataFrame ={
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT * FROM
|
||||
|((SELECT common_client_ip AS IP,MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
|MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
|count(*) as SESSION_COUNT,
|
||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|
||||
|'client' as ip_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|where $where
|
||||
|group by common_client_ip)
|
||||
|UNION ALL
|
||||
|(SELECT common_server_ip AS IP,
|
||||
|MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
|MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
|count(*) as SESSION_COUNT,
|
||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|
||||
|'server' as ip_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|where $where
|
||||
|group by common_server_ip))) as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
|
||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT * FROM
|
||||
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
||||
|UNION ALL
|
||||
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
||||
|WHERE FQDN != '') as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getRelationSubidLocateIpDf: DataFrame ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(
|
||||
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|
||||
|FROM radius_record_log
|
||||
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|
||||
|) as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getVertexSubidDf: DataFrame ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(
|
||||
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log
|
||||
|WHERE $where GROUP BY common_subscriber_id
|
||||
|)as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
def getVertexFramedIpDf: DataFrame ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(
|
||||
|SELECT DISTINCT radius_framed_ip,common_recv_time as LAST_FOUND_TIME FROM radius_record_log WHERE $where
|
||||
|)as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val frame = initClickhouseData(sql)
|
||||
frame.printSchema()
|
||||
frame
|
||||
}
|
||||
|
||||
|
||||
private def getTimeLimit: (Long,Long) ={
|
||||
var maxTime = 0L
|
||||
var minTime = 0L
|
||||
|
||||
@@ -4,58 +4,38 @@ import java.util.regex.Pattern
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.dao.BaseClickhouseData
|
||||
import cn.ac.iie.spark.ArangoSpark
|
||||
import cn.ac.iie.spark.partition.CustomPartitioner
|
||||
import cn.ac.iie.spark.rdd.ReadOptions
|
||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.functions._
|
||||
import org.slf4j.LoggerFactory
|
||||
import cn.ac.iie.utils.SparkSessionUtil._
|
||||
|
||||
object MergeDataFrame {
|
||||
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
|
||||
private val pattern = Pattern.compile("^[\\d]*$")
|
||||
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
|
||||
|
||||
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
|
||||
val fqdnAccmu = getLongAccumulator("FQDN Accumulator")
|
||||
val fqdnRddRow = BaseClickhouseData.getVertexFqdnDf
|
||||
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
|
||||
fqdnAccmu.add(1)
|
||||
(row.getAs[String]("FQDN"), row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
fqdnRddRow.cache()
|
||||
val fqdnRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"FQDN",options)
|
||||
|
||||
fqdnRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnRddRow)
|
||||
def mergeVertexFqdn(): RDD[Row] ={
|
||||
BaseClickhouseData.getVertexFqdnDf
|
||||
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row))
|
||||
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
}
|
||||
|
||||
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Option[Row]))]={
|
||||
val ipAccum = getLongAccumulator("IP Accumulator")
|
||||
def mergeVertexIp(): RDD[Row]={
|
||||
val vertexIpDf = BaseClickhouseData.getVertexIpDf
|
||||
val frame = vertexIpDf.groupBy("IP").agg(
|
||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
|
||||
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
|
||||
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
|
||||
collect_list("ip_type").alias("ip_type_list"),
|
||||
last("common_link_info").alias("common_link_info")
|
||||
collect_list("ip_type").alias("ip_type_list")
|
||||
)
|
||||
val ipRddRow = frame.rdd.map(row => {
|
||||
ipAccum.add(1)
|
||||
(row.getAs[String]("IP"), row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
val ipRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
|
||||
|
||||
ipRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(ipRddRow)
|
||||
|
||||
val values = frame.rdd.map(row => (row.get(0), row))
|
||||
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
values
|
||||
}
|
||||
|
||||
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
|
||||
val fqdnLocIpAccum = getLongAccumulator("R_LOCATE_FQDN2IP Accumulator")
|
||||
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
|
||||
def mergeRelationFqdnLocateIp(): RDD[Row] ={
|
||||
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf().filter(row => isDomain(row.getAs[String]("FQDN")))
|
||||
.groupBy("FQDN", "common_server_ip")
|
||||
.agg(
|
||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||
@@ -64,61 +44,13 @@ object MergeDataFrame {
|
||||
collect_list("schema_type").alias("schema_type_list"),
|
||||
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
|
||||
)
|
||||
val fqdnLocIpRddRow = frame.rdd.map(row => {
|
||||
frame.rdd.map(row => {
|
||||
val fqdn = row.getAs[String]("FQDN")
|
||||
val serverIp = row.getAs[String]("common_server_ip")
|
||||
val key = fqdn.concat("-" + serverIp)
|
||||
fqdnLocIpAccum.add(1)
|
||||
(key, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
val fqdnLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_FQDN2IP",options)
|
||||
|
||||
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnLocIpRddRow)
|
||||
|
||||
}
|
||||
|
||||
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
|
||||
val subidLocIpAccum = getLongAccumulator("R_LOCATE_SUBSCRIBER2IP Accumulator")
|
||||
val subidLocIpRddRow = BaseClickhouseData.getRelationSubidLocateIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
val key = commonSubscriberId.concat("-" + ip)
|
||||
subidLocIpAccum.add(1)
|
||||
(key, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
val subidLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_SUBSCRIBER2IP",options)
|
||||
|
||||
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidLocIpRddRow)
|
||||
}
|
||||
|
||||
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
|
||||
val subidAccum = getLongAccumulator("SUBSCRIBER Accumulator")
|
||||
val subidRddRow = BaseClickhouseData.getVertexSubidDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||
subidAccum.add(1)
|
||||
(commonSubscriberId, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
|
||||
val subidRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"SUBSCRIBER",options)
|
||||
|
||||
subidRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidRddRow)
|
||||
|
||||
}
|
||||
|
||||
def mergeVertexFrameIp: RDD[Row] ={
|
||||
val framedIpAccum = getLongAccumulator("framed ip Accumulator")
|
||||
val values = BaseClickhouseData.getVertexFramedIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
framedIpAccum.add(1)
|
||||
(ip, row)
|
||||
val key = fqdn.concat("-"+serverIp)
|
||||
(key,row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
values
|
||||
|
||||
}
|
||||
|
||||
private def isDomain(fqdn: String): Boolean = {
|
||||
@@ -126,10 +58,9 @@ object MergeDataFrame {
|
||||
if (fqdn == null || fqdn.length == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
val fqdnArr = fqdn.split(":")(0).split("\\.")
|
||||
|
||||
if (fqdnArr.length != 4){
|
||||
val domain = fqdn.split(":")(0)
|
||||
val fqdnArr = domain.split("\\.")
|
||||
if (fqdnArr.length < 4 || fqdnArr.length > 4){
|
||||
return true
|
||||
}
|
||||
for (f <- fqdnArr) {
|
||||
@@ -144,10 +75,9 @@ object MergeDataFrame {
|
||||
}
|
||||
} catch {
|
||||
case e: Exception =>
|
||||
LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString)
|
||||
LOG.warn("解析域名 " + fqdn + " 失败:\n" + e.toString)
|
||||
}
|
||||
false
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package cn.ac.iie.service.update
|
||||
|
||||
|
||||
import java.util
|
||||
import scala.collection.JavaConversions._
|
||||
import java.lang
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.service.read.ReadHistoryArangoData
|
||||
@@ -15,25 +14,17 @@ object UpdateDocHandler {
|
||||
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
|
||||
|
||||
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
|
||||
if(hisDoc.getProperties.containsKey(attributeName)){
|
||||
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
||||
if (newAttribute > hisAttritube){
|
||||
hisAttritube = newAttribute
|
||||
}
|
||||
hisDoc.addAttribute(attributeName,hisAttritube)
|
||||
}
|
||||
}
|
||||
|
||||
def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
|
||||
if (hisDoc.getProperties.containsKey(attributeName)){
|
||||
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
||||
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
|
||||
}
|
||||
}
|
||||
|
||||
def replaceAttribute(hisDoc: BaseDocument,newAttribute:String,attributeName:String): Unit ={
|
||||
hisDoc.addAttribute(attributeName,newAttribute)
|
||||
}
|
||||
|
||||
def separateAttributeByIpType(ipTypeList:ofRef[String],
|
||||
sessionCountList:ofRef[AnyRef],
|
||||
@@ -71,22 +62,20 @@ object UpdateDocHandler {
|
||||
}
|
||||
|
||||
def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
|
||||
if (hisDoc.getProperties.containsKey("PROTOCOL_TYPE")){
|
||||
var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
|
||||
protocolMap.foreach((t: (String, Long)) => {
|
||||
protocolMap.foreach(t => {
|
||||
if (t._2 > 0 && !protocolType.contains(t._1)){
|
||||
protocolType = protocolType.concat(","+ t._1)
|
||||
}
|
||||
val cntTotalName = t._1.concat("_CNT_TOTAL")
|
||||
val cntRecentName = t._1.concat("_CNT_RECENT")
|
||||
val cntRecent = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[Long]]
|
||||
val cntRecent: Array[lang.Long] = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[java.lang.Long]]
|
||||
cntRecent.update(0,t._2)
|
||||
updateSumAttribute(hisDoc,t._2,cntTotalName)
|
||||
hisDoc.addAttribute(cntRecentName,cntRecent)
|
||||
})
|
||||
hisDoc.addAttribute("PROTOCOL_TYPE",protocolType)
|
||||
}
|
||||
}
|
||||
|
||||
def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
|
||||
val protocolTypeBuilder = new mutable.StringBuilder()
|
||||
@@ -104,30 +93,16 @@ object UpdateDocHandler {
|
||||
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
|
||||
}
|
||||
|
||||
def updateProtocolDocument(doc: BaseEdgeDocument): Unit = {
|
||||
if (doc.getProperties.containsKey("PROTOCOL_TYPE")) {
|
||||
for (protocol <- PROTOCOL_SET) {
|
||||
val protocolRecent = protocol + "_CNT_RECENT"
|
||||
val cntRecent: util.ArrayList[Long] = doc.getAttribute(protocolRecent).asInstanceOf[util.ArrayList[Long]]
|
||||
val cntRecentsSrc = cntRecent.toArray().map(_.toString.toLong)
|
||||
val cntRecentsDst = new Array[Long](24)
|
||||
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1)
|
||||
cntRecentsDst(0) = 0L
|
||||
doc.addAttribute(protocolRecent, cntRecentsDst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def mergeDistinctIp(distCipRecent:ofRef[String]): Array[String] ={
|
||||
distCipRecent.flatMap(str => {
|
||||
str.replaceAll("\\[","")
|
||||
.replaceAll("\\]","")
|
||||
.replaceAll("\\'","")
|
||||
str.replaceAll("\\[", "")
|
||||
.replaceAll("\\]", "")
|
||||
.replaceAll("'", "")
|
||||
.split(",")
|
||||
}).distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||
}).distinct.toArray
|
||||
// distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||
}
|
||||
|
||||
|
||||
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
||||
val map = newDistinctIp.map(ip => {
|
||||
(ip, ReadHistoryArangoData.currentHour)
|
||||
@@ -137,9 +112,8 @@ object UpdateDocHandler {
|
||||
}
|
||||
|
||||
def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
||||
if (hisDoc.getProperties.containsKey("DIST_CIP") && hisDoc.getProperties.containsKey("DIST_CIP_TS")){
|
||||
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
|
||||
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[util.ArrayList[Long]]
|
||||
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
|
||||
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[Array[Long]]
|
||||
if (hisDistCip.length == hisDistCipTs.length){
|
||||
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
|
||||
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
|
||||
@@ -151,6 +125,5 @@ object UpdateDocHandler {
|
||||
hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
package cn.ac.iie.service.update
|
||||
|
||||
import java.util
|
||||
import java.util.concurrent.ConcurrentHashMap
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.dao.BaseArangoData
|
||||
import cn.ac.iie.dao.BaseArangoData._
|
||||
import cn.ac.iie.service.transform.MergeDataFrame._
|
||||
import cn.ac.iie.service.update.UpdateDocHandler._
|
||||
import cn.ac.iie.utils.{ArangoDBConnect, SparkSessionUtil}
|
||||
import cn.ac.iie.utils.{ArangoDBConnect, ExecutorThreadPool, SparkSessionUtil}
|
||||
import cn.ac.iie.utils.SparkSessionUtil.spark
|
||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||
import org.apache.spark.TaskContext
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.Row
|
||||
import org.slf4j.LoggerFactory
|
||||
@@ -14,47 +19,42 @@ import org.slf4j.LoggerFactory
|
||||
import scala.collection.mutable.WrappedArray.ofRef
|
||||
|
||||
object UpdateDocument {
|
||||
private val pool = ExecutorThreadPool.getInstance
|
||||
private val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance()
|
||||
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
|
||||
private val baseArangoData = new BaseArangoData()
|
||||
|
||||
def update(): Unit = {
|
||||
try {
|
||||
updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
|
||||
|
||||
updateDocument("SUBSCRIBER",getVertexSubidRow,mergeVertexSubid)
|
||||
|
||||
insertFrameIp()
|
||||
|
||||
updateDocument("R_LOCATE_SUBSCRIBER2IP",getRelationSubidLocateIpRow,mergeRelationSubidLocateIp)
|
||||
|
||||
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, mergeRelationFqdnLocateIp)
|
||||
|
||||
updateDocument("IP", getVertexIpRow, mergeVertexIp)
|
||||
|
||||
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, classOf[BaseEdgeDocument], mergeRelationFqdnLocateIp)
|
||||
} catch {
|
||||
case e: Exception => e.printStackTrace()
|
||||
} finally {
|
||||
pool.shutdown()
|
||||
arangoManger.clean()
|
||||
SparkSessionUtil.closeSpark()
|
||||
System.exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
private def updateDocument[T <: BaseDocument](collName: String,
|
||||
getDocumentRow: ((String, (Option[T], Option[Row]))) => T,
|
||||
getJoinRdd: () => RDD[(String, (Option[T], Option[Row]))]
|
||||
getDocumentRow: (Row, ConcurrentHashMap[String, T]) => T,
|
||||
clazz: Class[T],
|
||||
getNewDataRdd: () => RDD[Row]
|
||||
): Unit = {
|
||||
val historyMap = baseArangoData.readHistoryData(collName, clazz)
|
||||
val hisBc = spark.sparkContext.broadcast(historyMap)
|
||||
LOG.warn("广播变量发送完毕")
|
||||
try {
|
||||
val start = System.currentTimeMillis()
|
||||
val joinRdd = getJoinRdd()
|
||||
joinRdd.foreachPartition(iter => {
|
||||
val newDataRdd = getNewDataRdd()
|
||||
newDataRdd.foreachPartition(iter => {
|
||||
val partitionId: Int = TaskContext.get.partitionId
|
||||
val dictionaryMap: ConcurrentHashMap[String, T] = hisBc.value.get(partitionId)
|
||||
val resultDocumentList = new util.ArrayList[T]
|
||||
var i = 0
|
||||
iter.foreach(row => {
|
||||
val document = getDocumentRow(row)
|
||||
if (document != null){
|
||||
val document = getDocumentRow(row, dictionaryMap)
|
||||
resultDocumentList.add(document)
|
||||
}
|
||||
i += 1
|
||||
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
||||
arangoManger.overwrite(resultDocumentList, collName)
|
||||
@@ -71,238 +71,89 @@ object UpdateDocument {
|
||||
LOG.warn(s"更新$collName 时间:${last - start}")
|
||||
} catch {
|
||||
case e: Exception => e.printStackTrace()
|
||||
} finally {
|
||||
hisBc.destroy()
|
||||
}
|
||||
}
|
||||
|
||||
private def insertFrameIp(): Unit ={
|
||||
mergeVertexFrameIp.foreachPartition(iter => {
|
||||
val resultDocumentList = new util.ArrayList[BaseDocument]
|
||||
var i = 0
|
||||
iter.foreach(row => {
|
||||
val document = getVertexFrameipRow(row)
|
||||
resultDocumentList.add(document)
|
||||
i += 1
|
||||
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
|
||||
arangoManger.overwrite(resultDocumentList, "IP")
|
||||
LOG.warn(s"更新:IP" + i)
|
||||
i = 0
|
||||
private def getVertexFqdnRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = {
|
||||
val fqdn = row.getAs[String]("FQDN")
|
||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
var document: BaseDocument = dictionaryMap.getOrDefault(fqdn, null)
|
||||
if (document != null) {
|
||||
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
|
||||
} else {
|
||||
document = new BaseDocument
|
||||
document.setKey(fqdn)
|
||||
document.addAttribute("FQDN_NAME", fqdn)
|
||||
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
}
|
||||
})
|
||||
if (i != 0) {
|
||||
arangoManger.overwrite(resultDocumentList, "IP")
|
||||
LOG.warn(s"更新IP:" + i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private def getVertexFrameipRow(row: Row): BaseDocument ={
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
val document = new BaseDocument()
|
||||
document.setKey(ip)
|
||||
document.addAttribute("IP",ip)
|
||||
document
|
||||
}
|
||||
|
||||
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument ={
|
||||
|
||||
val subidLocIpDocOpt = joinRow._2._1
|
||||
var subidLocIpDoc = subidLocIpDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val subidLocIpRowOpt = joinRow._2._2
|
||||
|
||||
val subidLocIpRow = subidLocIpRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
|
||||
if (subidLocIpRow != null){
|
||||
val subId = subidLocIpRow.getAs[String]("common_subscriber_id")
|
||||
val ip = subidLocIpRow.getAs[String]("radius_framed_ip")
|
||||
val lastFoundTime = subidLocIpRow.getAs[Long]("LAST_FOUND_TIME")
|
||||
val firstFoundTime = subidLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||
|
||||
val key = subId.concat("-"+ip)
|
||||
if (subidLocIpDoc != null){
|
||||
updateMaxAttribute(subidLocIpDoc,lastFoundTime,"LAST_FOUND_TIME")
|
||||
} else {
|
||||
subidLocIpDoc = new BaseEdgeDocument()
|
||||
subidLocIpDoc.setKey(key)
|
||||
subidLocIpDoc.setFrom("SUBSCRIBER/" + subId)
|
||||
subidLocIpDoc.setTo("IP/" + ip)
|
||||
subidLocIpDoc.addAttribute("SUBSCRIBER",subId)
|
||||
subidLocIpDoc.addAttribute("IP",ip)
|
||||
subidLocIpDoc.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
|
||||
subidLocIpDoc.addAttribute("LAST_FOUND_TIME",lastFoundTime)
|
||||
}
|
||||
}
|
||||
subidLocIpDoc
|
||||
}
|
||||
|
||||
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument ={
|
||||
val subidDocOpt = joinRow._2._1
|
||||
var subidDoc = subidDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val subidRowOpt = joinRow._2._2
|
||||
|
||||
val subidRow = subidRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
|
||||
if (subidRow != null){
|
||||
val subId = subidRow.getAs[String]("common_subscriber_id")
|
||||
val subLastFoundTime = subidRow.getAs[Long]("LAST_FOUND_TIME")
|
||||
val subFirstFoundTime = subidRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||
if (subidDoc != null){
|
||||
updateMaxAttribute(subidDoc,subLastFoundTime,"LAST_FOUND_TIME")
|
||||
} else {
|
||||
subidDoc = new BaseDocument()
|
||||
subidDoc.setKey(subId)
|
||||
subidDoc.addAttribute("SUBSCRIBER",subId)
|
||||
subidDoc.addAttribute("FIRST_FOUND_TIME",subFirstFoundTime)
|
||||
subidDoc.addAttribute("LAST_FOUND_TIME",subLastFoundTime)
|
||||
}
|
||||
}
|
||||
|
||||
subidDoc
|
||||
}
|
||||
|
||||
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
|
||||
val fqdnDocOpt = joinRow._2._1
|
||||
var fqdnDoc = fqdnDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val fqdnRowOpt = joinRow._2._2
|
||||
|
||||
val fqdnRow = fqdnRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
|
||||
if (fqdnRow != null){
|
||||
val fqdn = fqdnRow.getAs[String]("FQDN")
|
||||
val lastFoundTime = fqdnRow.getAs[Long]("LAST_FOUND_TIME")
|
||||
val firstFoundTime = fqdnRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||
if (fqdnDoc != null) {
|
||||
updateMaxAttribute(fqdnDoc, lastFoundTime, "LAST_FOUND_TIME")
|
||||
} else {
|
||||
fqdnDoc = new BaseDocument
|
||||
fqdnDoc.setKey(fqdn)
|
||||
fqdnDoc.addAttribute("FQDN_NAME", fqdn)
|
||||
fqdnDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
fqdnDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
}
|
||||
}
|
||||
|
||||
fqdnDoc
|
||||
}
|
||||
|
||||
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
|
||||
val ipDocOpt = joinRow._2._1
|
||||
var ipDoc = ipDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val ipRowOpt = joinRow._2._2
|
||||
|
||||
val ipRow = ipRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
|
||||
if (ipRow != null){
|
||||
val ip = ipRow.getAs[String]("IP")
|
||||
val firstFoundTime = ipRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||
val lastFoundTime = ipRow.getAs[Long]("LAST_FOUND_TIME")
|
||||
val sessionCountList = ipRow.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
|
||||
val bytesSumList = ipRow.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
|
||||
val ipTypeList = ipRow.getAs[ofRef[String]]("ip_type_list")
|
||||
val linkInfo = ipRow.getAs[String]("common_link_info")
|
||||
private def getVertexIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseDocument]): BaseDocument = {
|
||||
val ip = row.getAs[String]("IP")
|
||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||
val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
|
||||
val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
|
||||
val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
|
||||
val sepAttributeTuple = separateAttributeByIpType(ipTypeList, sessionCountList, bytesSumList)
|
||||
|
||||
if (ipDoc != null) {
|
||||
updateMaxAttribute(ipDoc, lastFoundTime, "LAST_FOUND_TIME")
|
||||
updateSumAttribute(ipDoc, sepAttributeTuple._1, "SERVER_SESSION_COUNT")
|
||||
updateSumAttribute(ipDoc, sepAttributeTuple._2, "SERVER_BYTES_SUM")
|
||||
updateSumAttribute(ipDoc, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
|
||||
updateSumAttribute(ipDoc, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
|
||||
replaceAttribute(ipDoc,linkInfo,"COMMON_LINK_INFO")
|
||||
var document = dictionaryMap.getOrDefault(ip, null)
|
||||
if (document != null) {
|
||||
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
|
||||
updateSumAttribute(document, sepAttributeTuple._1, "SERVER_SESSION_COUNT")
|
||||
updateSumAttribute(document, sepAttributeTuple._2, "SERVER_BYTES_SUM")
|
||||
updateSumAttribute(document, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
|
||||
updateSumAttribute(document, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
|
||||
} else {
|
||||
ipDoc = new BaseDocument
|
||||
ipDoc.setKey(ip)
|
||||
ipDoc.addAttribute("IP", ip)
|
||||
ipDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
ipDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
ipDoc.addAttribute("SERVER_SESSION_COUNT", sepAttributeTuple._1)
|
||||
ipDoc.addAttribute("SERVER_BYTES_SUM", sepAttributeTuple._2)
|
||||
ipDoc.addAttribute("CLIENT_SESSION_COUNT", sepAttributeTuple._3)
|
||||
ipDoc.addAttribute("CLIENT_BYTES_SUM", sepAttributeTuple._4)
|
||||
ipDoc.addAttribute("COMMON_LINK_INFO", "")
|
||||
document = new BaseDocument
|
||||
document.setKey(ip)
|
||||
document.addAttribute("IP", ip)
|
||||
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
document.addAttribute("SERVER_SESSION_COUNT", sepAttributeTuple._1)
|
||||
document.addAttribute("SERVER_BYTES_SUM", sepAttributeTuple._2)
|
||||
document.addAttribute("CLIENT_SESSION_COUNT", sepAttributeTuple._3)
|
||||
document.addAttribute("CLIENT_BYTES_SUM", sepAttributeTuple._4)
|
||||
document.addAttribute("COMMON_LINK_INFO", "")
|
||||
}
|
||||
document
|
||||
}
|
||||
|
||||
ipDoc
|
||||
}
|
||||
private def getRelationFqdnLocateIpRow(row: Row, dictionaryMap: ConcurrentHashMap[String, BaseEdgeDocument]): BaseEdgeDocument = {
|
||||
val fqdn = row.getAs[String]("FQDN")
|
||||
val serverIp = row.getAs[String]("common_server_ip")
|
||||
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
|
||||
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
|
||||
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
|
||||
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
|
||||
|
||||
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument = {
|
||||
|
||||
val fqdnLocIpDocOpt = joinRow._2._1
|
||||
var fqdnLocIpDoc = fqdnLocIpDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val fqdnLocIpRowOpt = joinRow._2._2
|
||||
|
||||
val fqdnLocIpRow = fqdnLocIpRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
|
||||
if (fqdnLocIpDoc != null){
|
||||
updateProtocolDocument(fqdnLocIpDoc)
|
||||
}
|
||||
|
||||
if (fqdnLocIpRow != null){
|
||||
val fqdn = fqdnLocIpRow.getAs[String]("FQDN")
|
||||
val serverIp = fqdnLocIpRow.getAs[String]("common_server_ip")
|
||||
val firstFoundTime = fqdnLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||
val lastFoundTime = fqdnLocIpRow.getAs[Long]("LAST_FOUND_TIME")
|
||||
val countTotalList = fqdnLocIpRow.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
|
||||
val schemaTypeList = fqdnLocIpRow.getAs[ofRef[AnyRef]]("schema_type_list")
|
||||
val distCipRecent = fqdnLocIpRow.getAs[ofRef[String]]("DIST_CIP_RECENT")
|
||||
val distCipRecent = row.getAs[ofRef[String]]("DIST_CIP_RECENT")
|
||||
val disCips = mergeDistinctIp(distCipRecent)
|
||||
|
||||
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList, countTotalList)
|
||||
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
|
||||
|
||||
val key = fqdn.concat("-" + serverIp)
|
||||
|
||||
if (fqdnLocIpDoc != null) {
|
||||
updateMaxAttribute(fqdnLocIpDoc, lastFoundTime, "LAST_FOUND_TIME")
|
||||
updateProtocolAttritube(fqdnLocIpDoc, sepAttritubeMap)
|
||||
updateDistinctIp(fqdnLocIpDoc, distinctIp)
|
||||
var document = dictionaryMap.getOrDefault(key, null)
|
||||
if (document != null) {
|
||||
updateMaxAttribute(document, lastFoundTime, "LAST_FOUND_TIME")
|
||||
updateProtocolAttritube(document, sepAttritubeMap)
|
||||
updateDistinctIp(document,disCips)
|
||||
} else {
|
||||
fqdnLocIpDoc = new BaseEdgeDocument()
|
||||
fqdnLocIpDoc.setKey(key)
|
||||
fqdnLocIpDoc.setFrom("FQDN/" + fqdn)
|
||||
fqdnLocIpDoc.setTo("IP/" + serverIp)
|
||||
fqdnLocIpDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
fqdnLocIpDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
putProtocolAttritube(fqdnLocIpDoc, sepAttritubeMap)
|
||||
putDistinctIp(fqdnLocIpDoc, distinctIp)
|
||||
document = new BaseEdgeDocument()
|
||||
document.setKey(key)
|
||||
document.setFrom("FQDN/" + fqdn)
|
||||
document.setTo("IP/" + serverIp)
|
||||
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||
putDistinctIp(document,disCips)
|
||||
putProtocolAttritube(document, sepAttritubeMap)
|
||||
}
|
||||
}
|
||||
|
||||
fqdnLocIpDoc
|
||||
document
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* author Mark - mark at arangodb.com
|
||||
*/
|
||||
|
||||
package cn.ac.iie.spark
|
||||
|
||||
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions, WriteOptions}
|
||||
import cn.ac.iie.spark.vpack.VPackUtils
|
||||
import com.arangodb.model.DocumentCreateOptions
|
||||
import org.apache.spark.SparkContext
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.{DataFrame, Dataset, Row}
|
||||
|
||||
import scala.collection.JavaConverters.seqAsJavaListConverter
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
object ArangoSpark {
|
||||
|
||||
/**
|
||||
* Save data from rdd into ArangoDB
|
||||
*
|
||||
* @param rdd the rdd with the data to save
|
||||
* @param collection the collection to save in
|
||||
*/
|
||||
def save[T](rdd: RDD[T], collection: String): Unit =
|
||||
save(rdd, collection, WriteOptions())
|
||||
|
||||
/**
|
||||
* Save data from rdd into ArangoDB
|
||||
*
|
||||
* @param rdd the rdd with the data to save
|
||||
* @param collection the collection to save in
|
||||
* @param options additional write options
|
||||
*/
|
||||
def save[T](rdd: RDD[T], collection: String, options: WriteOptions): Unit =
|
||||
saveRDD(rdd, collection, options, (x: Iterator[T]) => x)
|
||||
|
||||
/**
|
||||
* Save data from dataset into ArangoDB
|
||||
*
|
||||
* @param dataset the dataset with data to save
|
||||
* @param collection the collection to save in
|
||||
*/
|
||||
def save[T](dataset: Dataset[T], collection: String): Unit =
|
||||
saveRDD(dataset.rdd, collection, WriteOptions(), (x: Iterator[T]) => x)
|
||||
|
||||
/**
|
||||
* Save data from dataset into ArangoDB
|
||||
*
|
||||
* @param dataset the dataset with data to save
|
||||
* @param collection the collection to save in
|
||||
* @param options additional write options
|
||||
*/
|
||||
def save[T](dataset: Dataset[T], collection: String, options: WriteOptions): Unit =
|
||||
saveRDD(dataset.rdd, collection, options, (x: Iterator[T]) => x)
|
||||
|
||||
/**
|
||||
* Save data from dataframe into ArangoDB
|
||||
*
|
||||
* @param dataframe the dataframe with data to save
|
||||
* @param collection the collection to save in
|
||||
*/
|
||||
def saveDF(dataframe: DataFrame, collection: String): Unit =
|
||||
saveRDD[Row](dataframe.rdd, collection, WriteOptions(), (x: Iterator[Row]) => x.map { y => VPackUtils.rowToVPack(y) })
|
||||
|
||||
/**
|
||||
* Save data from dataframe into ArangoDB
|
||||
*
|
||||
* @param dataframe the dataframe with data to save
|
||||
* @param collection the collection to save in
|
||||
* @param options additional write options
|
||||
*/
|
||||
def saveDF(dataframe: DataFrame, collection: String, options: WriteOptions): Unit =
|
||||
saveRDD[Row](dataframe.rdd, collection, options, (x: Iterator[Row]) => x.map { y => VPackUtils.rowToVPack(y) })
|
||||
|
||||
private def saveRDD[T](rdd: RDD[T], collection: String, options: WriteOptions, map: Iterator[T] => Iterator[Any]): Unit = {
|
||||
val writeOptions = createWriteOptions(options, rdd.sparkContext.getConf)
|
||||
rdd.foreachPartition { p =>
|
||||
if (p.nonEmpty) {
|
||||
val arangoDB = createArangoBuilder(writeOptions).build()
|
||||
val col = arangoDB.db(writeOptions.database).collection(collection)
|
||||
val docs = map(p).toList.asJava
|
||||
writeOptions.method match {
|
||||
case WriteOptions.INSERT => col.insertDocuments(docs)
|
||||
case WriteOptions.UPDATE => col.updateDocuments(docs)
|
||||
case WriteOptions.REPLACE => col.replaceDocuments(docs)
|
||||
case WriteOptions.OVERWRITE =>
|
||||
val documentCreateOptions = new DocumentCreateOptions
|
||||
documentCreateOptions.overwrite(true)
|
||||
documentCreateOptions.silent(true)
|
||||
col.insertDocuments(docs, documentCreateOptions)
|
||||
}
|
||||
|
||||
arangoDB.shutdown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load data from ArangoDB into rdd
|
||||
*
|
||||
* @param sparkContext the sparkContext containing the ArangoDB configuration
|
||||
* @param collection the collection to load data from
|
||||
*/
|
||||
def load[T: ClassTag](sparkContext: SparkContext, collection: String): ArangoRdd[T] =
|
||||
load(sparkContext, collection, ReadOptions())
|
||||
|
||||
/**
|
||||
* Load data from ArangoDB into rdd
|
||||
*
|
||||
* @param sparkContext the sparkContext containing the ArangoDB configuration
|
||||
* @param collection the collection to load data from
|
||||
* @param options read options
|
||||
*/
|
||||
def load[T: ClassTag](sparkContext: SparkContext, collection: String, options: ReadOptions): ArangoRdd[T] =
|
||||
new ArangoRdd[T](sparkContext, createReadOptions(options, sparkContext.getConf).copy(collection = collection))
|
||||
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package cn.ac.iie.spark.partition
|
||||
|
||||
import org.apache.spark.Partition
|
||||
|
||||
class QueryArangoPartition(idx: Int, val offset: Long, val separate: Long) extends Partition{
|
||||
override def index: Int = idx
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package cn.ac.iie.spark.rdd
|
||||
|
||||
import com.arangodb.Protocol
|
||||
import com.arangodb.entity.LoadBalancingStrategy
|
||||
|
||||
trait ArangoOptions {
|
||||
|
||||
def database: String = "_system"
|
||||
|
||||
def hosts: Option[String] = None
|
||||
|
||||
def user: Option[String] = None
|
||||
|
||||
def password: Option[String] = None
|
||||
|
||||
def useSsl: Option[Boolean] = None
|
||||
|
||||
def sslKeyStoreFile: Option[String] = None
|
||||
|
||||
def sslPassPhrase: Option[String] = None
|
||||
|
||||
def sslProtocol: Option[String] = None
|
||||
|
||||
def protocol: Option[Protocol] = None
|
||||
|
||||
def maxConnections: Option[Int] = None
|
||||
|
||||
def acquireHostList: Option[Boolean] = None
|
||||
|
||||
def acquireHostListInterval: Option[Int] = None
|
||||
|
||||
def loadBalancingStrategy: Option[LoadBalancingStrategy] = None
|
||||
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package cn.ac.iie.spark.rdd
|
||||
|
||||
import scala.collection.JavaConverters.asScalaIteratorConverter
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.service.update.UpdateDocument
|
||||
import cn.ac.iie.spark
|
||||
import cn.ac.iie.spark.partition.QueryArangoPartition
|
||||
import com.arangodb.ArangoCursor
|
||||
import org.apache.spark.{Partition, SparkContext, TaskContext}
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
import scala.collection.mutable.ArrayBuffer
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
class ArangoRdd[T: ClassTag](@transient override val sparkContext: SparkContext,
|
||||
val options: ReadOptions
|
||||
) extends RDD[T](sparkContext, Nil) {
|
||||
|
||||
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
|
||||
|
||||
override def compute(split: Partition, context: TaskContext): Iterator[T] = {
|
||||
|
||||
createCursor(split.asInstanceOf[QueryArangoPartition]).asScala
|
||||
}
|
||||
|
||||
override protected def getPartitions: Array[Partition] = {
|
||||
val partitions = ArrayBuffer[Partition]()
|
||||
val total = getCountTotal
|
||||
for (i <- 0 until ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS) {
|
||||
val partition = getPartition(i, total)
|
||||
partitions += partition
|
||||
}
|
||||
partitions.toArray
|
||||
}
|
||||
|
||||
private def createCursor(split: QueryArangoPartition)(implicit clazz: ClassTag[T]): ArangoCursor[T] = {
|
||||
|
||||
var arangoCursor:ArangoCursor[T] = null
|
||||
val arangoDB = spark.createArangoBuilder(options).build()
|
||||
try {
|
||||
val offset = split.offset
|
||||
val separate = split.separate
|
||||
val collection = options.collection
|
||||
val sql = s"FOR doc IN $collection limit $offset,$separate RETURN doc"
|
||||
LOG.info(sql)
|
||||
arangoCursor = arangoDB.db(options.database).query(sql,clazz.runtimeClass.asInstanceOf[Class[T]])
|
||||
}catch {
|
||||
case e: Exception => LOG.error(s"创建Cursor异常:${e.getMessage}")
|
||||
}finally {
|
||||
arangoDB.shutdown()
|
||||
}
|
||||
arangoCursor
|
||||
}
|
||||
|
||||
override def repartition(numPartitions: Int)(implicit ord: Ordering[T]): RDD[T] = super.repartition(numPartitions)
|
||||
|
||||
private def getPartition(idx: Int, countTotal: Long): QueryArangoPartition = {
|
||||
val sepNum = countTotal / ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS + 1
|
||||
val offsetNum = idx * sepNum
|
||||
new QueryArangoPartition(idx, offsetNum, sepNum)
|
||||
}
|
||||
|
||||
override def count(): Long = getCountTotal
|
||||
|
||||
private def getCountTotal: Long = {
|
||||
val arangoDB = spark.createArangoBuilder(options).build()
|
||||
var cnt = 0L
|
||||
val sql = s"RETURN LENGTH(${options.collection})"
|
||||
LOG.info(sql)
|
||||
try {
|
||||
val longs = arangoDB.db(options.database).query(sql, classOf[Long])
|
||||
while (longs.hasNext) cnt = longs.next
|
||||
} catch {
|
||||
case e: Exception => LOG.error(sql + s"执行异常:${e.getMessage}")
|
||||
}finally {
|
||||
arangoDB.shutdown()
|
||||
}
|
||||
cnt
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* author Mark - mark at arangodb.com
|
||||
*/
|
||||
|
||||
package cn.ac.iie.spark.rdd
|
||||
|
||||
import cn.ac.iie.spark.partition.QueryArangoPartition
|
||||
import com.arangodb.Protocol
|
||||
import com.arangodb.entity.LoadBalancingStrategy
|
||||
|
||||
case class ReadOptions(override val database: String = "_system",
|
||||
val collection: String = null,
|
||||
partitioner: QueryArangoPartition = new QueryArangoPartition(0,0,0),
|
||||
override val hosts: Option[String] = None,
|
||||
override val user: Option[String] = None,
|
||||
override val password: Option[String] = None,
|
||||
override val useSsl: Option[Boolean] = None,
|
||||
override val sslKeyStoreFile: Option[String] = None,
|
||||
override val sslPassPhrase: Option[String] = None,
|
||||
override val sslProtocol: Option[String] = None,
|
||||
override val protocol: Option[Protocol] = None,
|
||||
override val maxConnections: Option[Int] = None,
|
||||
override val acquireHostList: Option[Boolean] = None,
|
||||
override val acquireHostListInterval: Option[Int] = None,
|
||||
override val loadBalancingStrategy: Option[LoadBalancingStrategy] = None) extends ArangoOptions {
|
||||
|
||||
def this() = this(database = "_system")
|
||||
|
||||
def database(database: String): ReadOptions = copy(database = database)
|
||||
|
||||
def collection(collection: String): ReadOptions = copy(collection = collection)
|
||||
|
||||
def hosts(hosts: String): ReadOptions = copy(hosts = Some(hosts))
|
||||
|
||||
def user(user: String): ReadOptions = copy(user = Some(user))
|
||||
|
||||
def password(password: String): ReadOptions = copy(password = Some(password))
|
||||
|
||||
def useSsl(useSsl: Boolean): ReadOptions = copy(useSsl = Some(useSsl))
|
||||
|
||||
def sslKeyStoreFile(sslKeyStoreFile: String): ReadOptions = copy(sslKeyStoreFile = Some(sslKeyStoreFile))
|
||||
|
||||
def sslPassPhrase(sslPassPhrase: String): ReadOptions = copy(sslPassPhrase = Some(sslPassPhrase))
|
||||
|
||||
def sslProtocol(sslProtocol: String): ReadOptions = copy(sslProtocol = Some(sslProtocol))
|
||||
|
||||
def protocol(protocol: Protocol): ReadOptions = copy(protocol = Some(protocol))
|
||||
|
||||
def maxConnections(maxConnections: Int): ReadOptions = copy(maxConnections = Some(maxConnections))
|
||||
|
||||
def acquireHostList(acquireHostList: Boolean): ReadOptions = copy(acquireHostList = Some(acquireHostList))
|
||||
|
||||
def acquireHostListInterval(acquireHostListInterval: Int): ReadOptions = copy(acquireHostListInterval = Some(acquireHostListInterval))
|
||||
|
||||
def loadBalancingStrategy(loadBalancingStrategy: LoadBalancingStrategy): ReadOptions = copy(loadBalancingStrategy = Some(loadBalancingStrategy))
|
||||
|
||||
def copy(database: String = database,
|
||||
collection: String = collection,
|
||||
partitioner: QueryArangoPartition = partitioner,
|
||||
hosts: Option[String] = hosts,
|
||||
user: Option[String] = user,
|
||||
password: Option[String] = password,
|
||||
useSsl: Option[Boolean] = useSsl,
|
||||
sslKeyStoreFile: Option[String] = sslKeyStoreFile,
|
||||
sslPassPhrase: Option[String] = sslPassPhrase,
|
||||
sslProtocol: Option[String] = sslProtocol,
|
||||
protocol: Option[Protocol] = protocol,
|
||||
maxConnections: Option[Int] = maxConnections,
|
||||
acquireHostList: Option[Boolean] = acquireHostList,
|
||||
acquireHostListInterval: Option[Int] = acquireHostListInterval,
|
||||
loadBalancingStrategy: Option[LoadBalancingStrategy] = loadBalancingStrategy): ReadOptions = {
|
||||
ReadOptions(database, collection, partitioner, hosts, user, password, useSsl, sslKeyStoreFile, sslPassPhrase, sslProtocol, protocol, maxConnections, acquireHostList, acquireHostListInterval, loadBalancingStrategy)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
/*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* author Mark - mark at arangodb.com
|
||||
*/
|
||||
|
||||
package cn.ac.iie.spark.rdd
|
||||
|
||||
import javax.net.ssl.SSLContext
|
||||
import com.arangodb.Protocol
|
||||
import com.arangodb.entity.LoadBalancingStrategy
|
||||
|
||||
case class WriteOptions(override val database: String = "_system",
|
||||
val method: WriteOptions.Method = WriteOptions.INSERT,
|
||||
override val hosts: Option[String] = None,
|
||||
override val user: Option[String] = None,
|
||||
override val password: Option[String] = None,
|
||||
override val useSsl: Option[Boolean] = None,
|
||||
override val sslKeyStoreFile: Option[String] = None,
|
||||
override val sslPassPhrase: Option[String] = None,
|
||||
override val sslProtocol: Option[String] = None,
|
||||
override val protocol: Option[Protocol] = None,
|
||||
override val maxConnections: Option[Int] = None,
|
||||
override val acquireHostList: Option[Boolean] = None,
|
||||
override val acquireHostListInterval: Option[Int] = None,
|
||||
override val loadBalancingStrategy: Option[LoadBalancingStrategy] = None) extends ArangoOptions {
|
||||
import WriteOptions._
|
||||
|
||||
def this() = this(database = "_system")
|
||||
|
||||
def database(database: String): WriteOptions = copy(database = database)
|
||||
|
||||
def method(method: Method): WriteOptions = copy(method = method)
|
||||
|
||||
def hosts(hosts: String): WriteOptions = copy(hosts = Some(hosts))
|
||||
|
||||
def user(user: String): WriteOptions = copy(user = Some(user))
|
||||
|
||||
def password(password: String): WriteOptions = copy(password = Some(password))
|
||||
|
||||
def useSsl(useSsl: Boolean): WriteOptions = copy(useSsl = Some(useSsl))
|
||||
|
||||
def sslKeyStoreFile(sslKeyStoreFile: String): WriteOptions = copy(sslKeyStoreFile = Some(sslKeyStoreFile))
|
||||
|
||||
def sslPassPhrase(sslPassPhrase: String): WriteOptions = copy(sslPassPhrase = Some(sslPassPhrase))
|
||||
|
||||
def sslProtocol(sslProtocol: String): WriteOptions = copy(sslProtocol = Some(sslProtocol))
|
||||
|
||||
def protocol(protocol: Protocol): WriteOptions = copy(protocol = Some(protocol))
|
||||
|
||||
def maxConnections(maxConnections: Int): WriteOptions = copy(maxConnections = Some(maxConnections))
|
||||
|
||||
def acquireHostList(acquireHostList: Boolean): WriteOptions = copy(acquireHostList = Some(acquireHostList))
|
||||
|
||||
def acquireHostListInterval(acquireHostListInterval: Int): WriteOptions = copy(acquireHostListInterval = Some(acquireHostListInterval))
|
||||
|
||||
def loadBalancingStrategy(loadBalancingStrategy: LoadBalancingStrategy): WriteOptions = copy(loadBalancingStrategy = Some(loadBalancingStrategy))
|
||||
|
||||
def copy(database: String = database,
|
||||
method: Method = method,
|
||||
hosts: Option[String] = hosts,
|
||||
user: Option[String] = user,
|
||||
password: Option[String] = password,
|
||||
useSsl: Option[Boolean] = useSsl,
|
||||
sslKeyStoreFile: Option[String] = sslKeyStoreFile,
|
||||
sslPassPhrase: Option[String] = sslPassPhrase,
|
||||
sslProtocol: Option[String] = sslProtocol,
|
||||
protocol: Option[Protocol] = protocol,
|
||||
maxConnections: Option[Int] = maxConnections,
|
||||
acquireHostList: Option[Boolean] = acquireHostList,
|
||||
acquireHostListInterval: Option[Int] = acquireHostListInterval,
|
||||
loadBalancingStrategy: Option[LoadBalancingStrategy] = loadBalancingStrategy): WriteOptions = {
|
||||
WriteOptions(database, method, hosts, user, password, useSsl, sslKeyStoreFile, sslPassPhrase, sslProtocol, protocol, maxConnections, acquireHostList, acquireHostListInterval, loadBalancingStrategy)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
object WriteOptions {
|
||||
|
||||
/**
|
||||
* method to save documents to arangodb
|
||||
*/
|
||||
sealed trait Method
|
||||
|
||||
/**
|
||||
* save documents by inserting
|
||||
* @see [[com.arangodb.ArangoCollection#insertDocuments(java.util.Collection)]]
|
||||
*/
|
||||
case object INSERT extends Method
|
||||
|
||||
/**
|
||||
* save documents by updating
|
||||
* @see [[com.arangodb.ArangoCollection#updateDocuments(java.util.Collection)]]
|
||||
*/
|
||||
case object UPDATE extends Method
|
||||
|
||||
/**
|
||||
* save documents by replacing
|
||||
* @see [[com.arangodb.ArangoCollection#replaceDocuments(java.util.Collection)]]
|
||||
*/
|
||||
case object REPLACE extends Method
|
||||
|
||||
/**
|
||||
* save documents by overwrite
|
||||
*/
|
||||
case object OVERWRITE extends Method
|
||||
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
/*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* author Mark - mark at arangodb.com
|
||||
*/
|
||||
|
||||
package cn.ac.iie
|
||||
|
||||
import java.io.FileInputStream
|
||||
import java.security.KeyStore
|
||||
|
||||
import cn.ac.iie.spark.rdd.{ArangoOptions, ReadOptions, WriteOptions}
|
||||
import com.arangodb.{ArangoDB, ArangoDBException, Protocol}
|
||||
import com.arangodb.entity.LoadBalancingStrategy
|
||||
import com.arangodb.velocypack.module.jdk8.VPackJdk8Module
|
||||
import com.arangodb.velocypack.module.scala.VPackScalaModule
|
||||
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}
|
||||
import org.apache.spark.SparkConf
|
||||
|
||||
import scala.util.Try
|
||||
|
||||
package object spark {
|
||||
|
||||
val PropertyHosts = "arangodb.hosts"
|
||||
val PropertyUser = "arangodb.user"
|
||||
val PropertyPassword = "arangodb.password"
|
||||
val PropertyUseSsl = "arangodb.useSsl"
|
||||
val PropertySslKeyStoreFile = "arangodb.ssl.keyStoreFile"
|
||||
val PropertySslPassPhrase = "arangodb.ssl.passPhrase"
|
||||
val PropertySslProtocol = "arangodb.ssl.protocol"
|
||||
val PropertyProtocol = "arangodb.protocol"
|
||||
val PropertyMaxConnections = "arangodb.maxConnections"
|
||||
val PropertyAcquireHostList = "arangodb.acquireHostList"
|
||||
val PropertyAcquireHostListInterval = "arangodb.acquireHostListInterval"
|
||||
val PropertyLoadBalancingStrategy = "arangodb.loadBalancingStrategy"
|
||||
|
||||
private[spark] def createReadOptions(options: ReadOptions, sc: SparkConf): ReadOptions = {
|
||||
options.copy(
|
||||
hosts = options.hosts.orElse(some(sc.get(PropertyHosts, null))),
|
||||
user = options.user.orElse(some(sc.get(PropertyUser, null))),
|
||||
password = options.password.orElse(some(sc.get(PropertyPassword, null))),
|
||||
useSsl = options.useSsl.orElse(some(Try(sc.get(PropertyUseSsl, null).toBoolean).getOrElse(false))),
|
||||
sslKeyStoreFile = options.sslKeyStoreFile.orElse(some(sc.get(PropertySslKeyStoreFile, null))),
|
||||
sslPassPhrase = options.sslPassPhrase.orElse(some(sc.get(PropertySslPassPhrase, null))),
|
||||
sslProtocol = options.sslProtocol.orElse(some(sc.get(PropertySslProtocol, null))),
|
||||
protocol = options.protocol.orElse(some(Protocol.valueOf(sc.get(PropertyProtocol, "VST")))),
|
||||
maxConnections = options.maxConnections.orElse(some(Try(sc.get(PropertyMaxConnections, null).toInt).getOrElse(1))),
|
||||
acquireHostList = options.acquireHostList.orElse(some(Try(sc.get(PropertyAcquireHostList, null).toBoolean).getOrElse(false))),
|
||||
acquireHostListInterval = options.acquireHostListInterval.orElse(some(Try(sc.get(PropertyAcquireHostListInterval, null).toInt).getOrElse(60000))),
|
||||
loadBalancingStrategy = options.loadBalancingStrategy.orElse(some(LoadBalancingStrategy.valueOf(sc.get(PropertyLoadBalancingStrategy, "NONE")))))
|
||||
}
|
||||
|
||||
private[spark] def createWriteOptions(options: WriteOptions, sc: SparkConf): WriteOptions = {
|
||||
options.copy(
|
||||
hosts = options.hosts.orElse(some(sc.get(PropertyHosts, null))),
|
||||
user = options.user.orElse(some(sc.get(PropertyUser, null))),
|
||||
password = options.password.orElse(some(sc.get(PropertyPassword, null))),
|
||||
useSsl = options.useSsl.orElse(some(Try(sc.get(PropertyUseSsl, null).toBoolean).getOrElse(false))),
|
||||
sslKeyStoreFile = options.sslKeyStoreFile.orElse(some(sc.get(PropertySslKeyStoreFile, null))),
|
||||
sslPassPhrase = options.sslPassPhrase.orElse(some(sc.get(PropertySslPassPhrase, null))),
|
||||
sslProtocol = options.sslProtocol.orElse(some(sc.get(PropertySslProtocol, null))),
|
||||
protocol = options.protocol.orElse(some(Protocol.valueOf(sc.get(PropertyProtocol, "VST")))),
|
||||
maxConnections = options.maxConnections.orElse(some(Try(sc.get(PropertyMaxConnections, null).toInt).getOrElse(1))),
|
||||
acquireHostList = options.acquireHostList.orElse(some(Try(sc.get(PropertyAcquireHostList, null).toBoolean).getOrElse(false))),
|
||||
acquireHostListInterval = options.acquireHostListInterval.orElse(some(Try(sc.get(PropertyAcquireHostListInterval, null).toInt).getOrElse(60000))),
|
||||
loadBalancingStrategy = options.loadBalancingStrategy.orElse(some(LoadBalancingStrategy.valueOf(sc.get(PropertyLoadBalancingStrategy, "NONE")))))
|
||||
}
|
||||
|
||||
private[spark] def createArangoBuilder(options: ArangoOptions): ArangoDB.Builder = {
|
||||
val builder = new ArangoDB.Builder()
|
||||
builder.registerModules(new VPackJdk8Module, new VPackScalaModule)
|
||||
options.hosts.foreach { hosts(_).foreach(host => builder.host(host._1, host._2)) }
|
||||
options.user.foreach { builder.user(_) }
|
||||
options.password.foreach { builder.password(_) }
|
||||
options.useSsl.foreach { builder.useSsl(_) }
|
||||
if (options.sslKeyStoreFile.isDefined && options.sslPassPhrase.isDefined) {
|
||||
builder.sslContext(createSslContext(options.sslKeyStoreFile.get, options.sslPassPhrase.get, options.sslProtocol.getOrElse("TLS")))
|
||||
}
|
||||
options.protocol.foreach { builder.useProtocol(_) }
|
||||
options.maxConnections.foreach { builder.maxConnections(_) }
|
||||
options.acquireHostList.foreach { builder.acquireHostList(_) }
|
||||
options.acquireHostListInterval.foreach { builder.acquireHostListInterval(_) }
|
||||
options.loadBalancingStrategy.foreach { builder.loadBalancingStrategy(_) }
|
||||
builder
|
||||
}
|
||||
|
||||
private def createSslContext(keyStoreFile: String, passPhrase: String, protocol: String): SSLContext = {
|
||||
val ks = KeyStore.getInstance(KeyStore.getDefaultType());
|
||||
val kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
|
||||
ks.load(new FileInputStream(keyStoreFile), passPhrase.toCharArray());
|
||||
kmf.init(ks, passPhrase.toCharArray());
|
||||
val tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
|
||||
tmf.init(ks);
|
||||
val sc = SSLContext.getInstance(protocol);
|
||||
sc.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
|
||||
sc
|
||||
}
|
||||
|
||||
private def some(value: String): Option[String] =
|
||||
if (value != null) Some(value) else None
|
||||
|
||||
private def some(value: Int): Option[Int] =
|
||||
Some(value)
|
||||
|
||||
private def some(value: Boolean): Option[Boolean] =
|
||||
Some(value)
|
||||
|
||||
private def some(value: Protocol): Option[Protocol] =
|
||||
Some(value)
|
||||
|
||||
private def some(value: LoadBalancingStrategy): Option[LoadBalancingStrategy] =
|
||||
Some(value)
|
||||
|
||||
private def hosts(hosts: String): List[(String, Int)] =
|
||||
hosts.split(",").map({ x =>
|
||||
val s = x.split(":")
|
||||
if (s.length != 2 || !s(1).matches("[0-9]+"))
|
||||
throw new ArangoDBException(s"Could not load property-value arangodb.hosts=${s}. Expected format ip:port,ip:port,...");
|
||||
else
|
||||
(s(0), s(1).toInt)
|
||||
}).toList
|
||||
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
/*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* Copyright 2016 ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
*
|
||||
* author Mark - mark at arangodb.com
|
||||
*/
|
||||
|
||||
package cn.ac.iie.spark.vpack
|
||||
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.types.{
|
||||
ArrayType,
|
||||
BooleanType,
|
||||
DataType,
|
||||
DateType,
|
||||
DecimalType,
|
||||
DoubleType,
|
||||
FloatType,
|
||||
IntegerType,
|
||||
LongType,
|
||||
MapType,
|
||||
NullType,
|
||||
ShortType,
|
||||
StringType,
|
||||
StructField,
|
||||
StructType,
|
||||
TimestampType
|
||||
}
|
||||
import com.arangodb.velocypack.VPackBuilder
|
||||
import com.arangodb.velocypack.VPackSlice
|
||||
import com.arangodb.velocypack.ValueType
|
||||
|
||||
private[spark] object VPackUtils {
|
||||
|
||||
def rowToVPack(row: Row): VPackSlice = {
|
||||
val builder = new VPackBuilder()
|
||||
if (row == null) {
|
||||
builder.add(ValueType.NULL)
|
||||
} else {
|
||||
builder.add(ValueType.OBJECT)
|
||||
row.schema.fields.zipWithIndex.foreach { addField(_, row, builder) }
|
||||
builder.close()
|
||||
}
|
||||
builder.slice()
|
||||
}
|
||||
|
||||
private def addField(field: (StructField, Int), row: Row, builder: VPackBuilder): Unit = {
|
||||
val name = field._1.name
|
||||
val index = field._2
|
||||
if (row.isNullAt(index)) {
|
||||
builder.add(name, ValueType.NULL)
|
||||
} else {
|
||||
field._1.dataType match {
|
||||
case BooleanType => builder.add(name, java.lang.Boolean.valueOf(row.getBoolean(index)))
|
||||
case DoubleType => builder.add(name, java.lang.Double.valueOf(row.getDouble(index)))
|
||||
case FloatType => builder.add(name, java.lang.Float.valueOf(row.getFloat(index)))
|
||||
case LongType => builder.add(name, java.lang.Long.valueOf(row.getLong(index)))
|
||||
case IntegerType => builder.add(name, java.lang.Integer.valueOf(row.getInt(index)))
|
||||
case ShortType => builder.add(name, java.lang.Short.valueOf(row.getShort(index)))
|
||||
case StringType => builder.add(name, java.lang.String.valueOf(row.getString(index)));
|
||||
case DateType => builder.add(name, row.getDate(index))
|
||||
case TimestampType => builder.add(name, row.getTimestamp(index))
|
||||
case t: DecimalType => builder.add(name, row.getDecimal(index))
|
||||
case t: MapType => {
|
||||
builder.add(name, ValueType.OBJECT)
|
||||
row.getMap[String, Any](index).foreach { case (name, value) => addValue(name, value, builder) }
|
||||
builder.close()
|
||||
}
|
||||
case t: ArrayType => {
|
||||
builder.add(name, ValueType.ARRAY)
|
||||
addValues(row, index, builder, t.elementType)
|
||||
builder.close()
|
||||
}
|
||||
case NullType => builder.add(name, ValueType.NULL)
|
||||
case struct: StructType => builder.add(name, rowToVPack(row.getStruct(index)))
|
||||
case _ => // TODO
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def addValues(row: Row, index: Int, builder: VPackBuilder, itemType: DataType): Unit = {
|
||||
itemType match {
|
||||
case BooleanType =>
|
||||
row.getSeq[Boolean](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case DoubleType =>
|
||||
row.getSeq[Double](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case FloatType =>
|
||||
row.getSeq[Float](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case LongType =>
|
||||
row.getSeq[Long](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case IntegerType =>
|
||||
row.getSeq[Int](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case ShortType =>
|
||||
row.getSeq[Short](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case StringType =>
|
||||
row.getSeq[String](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case DateType =>
|
||||
row.getSeq[java.sql.Date](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case TimestampType =>
|
||||
row.getSeq[java.sql.Timestamp](index).foreach { value =>
|
||||
addValue(null, value, builder)
|
||||
}
|
||||
case s: StructType => {
|
||||
row.getSeq[Row](index).foreach { value =>
|
||||
builder.add(null, rowToVPack(value))
|
||||
}
|
||||
}
|
||||
case t: MapType => // TODO
|
||||
case t: ArrayType => // TODO
|
||||
case _ => // TODO
|
||||
}
|
||||
}
|
||||
|
||||
private def addValue(name: String, value: Any, builder: VPackBuilder): Unit = {
|
||||
value match {
|
||||
case value: Boolean => builder.add(name, java.lang.Boolean.valueOf(value))
|
||||
case value: Double => builder.add(name, java.lang.Double.valueOf(value))
|
||||
case value: Float => builder.add(name, java.lang.Float.valueOf(value))
|
||||
case value: Long => builder.add(name, java.lang.Long.valueOf(value))
|
||||
case value: Int => builder.add(name, java.lang.Integer.valueOf(value))
|
||||
case value: Short => builder.add(name, java.lang.Short.valueOf(value))
|
||||
case value: String => builder.add(name, java.lang.String.valueOf(value))
|
||||
case value: java.sql.Date => builder.add(name, value)
|
||||
case value: java.sql.Timestamp => builder.add(name, value)
|
||||
case _ => // TODO
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,9 +1,7 @@
|
||||
package cn.ac.iie.utils
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import org.apache.spark.SparkContext
|
||||
import org.apache.spark.sql.SparkSession
|
||||
import org.apache.spark.util.LongAccumulator
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
object SparkSessionUtil {
|
||||
@@ -11,8 +9,6 @@ object SparkSessionUtil {
|
||||
|
||||
val spark: SparkSession = getSparkSession
|
||||
|
||||
var sparkContext: SparkContext = getContext
|
||||
|
||||
private def getSparkSession: SparkSession ={
|
||||
val spark: SparkSession = SparkSession
|
||||
.builder()
|
||||
@@ -21,36 +17,16 @@ object SparkSessionUtil {
|
||||
.config("spark.network.timeout", ApplicationConfig.SPARK_NETWORK_TIMEOUT)
|
||||
.config("spark.sql.shuffle.partitions", ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY)
|
||||
.config("arangodb.hosts", s"${ApplicationConfig.ARANGODB_HOST}:${ApplicationConfig.ARANGODB_PORT}")
|
||||
.config("arangodb.user", ApplicationConfig.ARANGODB_USER)
|
||||
.config("arangodb.password", ApplicationConfig.ARANGODB_PASSWORD)
|
||||
.master(ApplicationConfig.MASTER)
|
||||
.getOrCreate()
|
||||
LOG.warn("sparkession获取成功!!!")
|
||||
spark
|
||||
}
|
||||
|
||||
def getContext: SparkContext = {
|
||||
@transient var sc: SparkContext = null
|
||||
if (sparkContext == null) sc = spark.sparkContext
|
||||
sc
|
||||
}
|
||||
|
||||
def getLongAccumulator(name: String): LongAccumulator ={
|
||||
if (sparkContext == null){
|
||||
sparkContext = getContext
|
||||
}
|
||||
sparkContext.longAccumulator(name)
|
||||
|
||||
}
|
||||
|
||||
def closeSpark(): Unit ={
|
||||
if (spark != null){
|
||||
spark.stop()
|
||||
}
|
||||
if (sparkContext != null){
|
||||
sparkContext.stop()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import org.apache.spark.sql.SparkSession
|
||||
object BaseClickhouseDataTest {
|
||||
private val spark: SparkSession = SparkSessionUtil.spark
|
||||
def main(args: Array[String]): Unit = {
|
||||
// BaseClickhouseData loadConnectionDataFromCk()
|
||||
BaseClickhouseData loadConnectionDataFromCk()
|
||||
val sql =
|
||||
"""
|
||||
|SELECT
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
package cn.ac.iie.spark
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.dao.BaseClickhouseData
|
||||
import cn.ac.iie.spark.partition.CustomPartitioner
|
||||
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions}
|
||||
import cn.ac.iie.utils.SparkSessionUtil
|
||||
import com.arangodb.entity.BaseDocument
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.functions.{collect_list, max, min}
|
||||
import org.apache.spark.storage.StorageLevel
|
||||
|
||||
object RDDTest {
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
||||
val sparkContext = SparkSessionUtil.spark.sparkContext
|
||||
|
||||
println(sparkContext.getConf.get("arangodb.hosts"))
|
||||
|
||||
// val options = ReadOptions("iplearn_media_domain").copy(collection = "R_LOCATE_FQDN2IP")
|
||||
val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
|
||||
|
||||
val ipOptions = options.copy(collection = "IP")
|
||||
|
||||
val rdd: ArangoRdd[BaseDocument] = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
|
||||
|
||||
println(rdd.count())
|
||||
println(rdd.getNumPartitions)
|
||||
|
||||
val ipRDD = mergeVertexIp()
|
||||
val value: RDD[(String, (Option[BaseDocument], Option[Row]))] = rdd.map(doc => {
|
||||
(doc.getKey, doc)
|
||||
}).fullOuterJoin(ipRDD)
|
||||
value.foreach((row: (String, (Option[BaseDocument], Option[Row]))) => {
|
||||
val value = row._2._2
|
||||
val str: String = value match {
|
||||
case Some(r) => r.getAs[String]("IP")
|
||||
// case None => null
|
||||
case _ => null
|
||||
}
|
||||
println(str)
|
||||
})
|
||||
|
||||
/*
|
||||
val value: RDD[BaseDocument] = rdd.filter(doc => doc.getAttribute("CLIENT_SESSION_COUNT").asInstanceOf[Long] > 100).map(doc => {
|
||||
doc.addAttribute("abc", 1)
|
||||
doc
|
||||
})
|
||||
value.map(doc => {(doc.getKey,doc)})
|
||||
value.persist(StorageLevel.MEMORY_AND_DISK)
|
||||
value.foreach(fqdnRow => println(fqdnRow.toString))
|
||||
println(value.count())
|
||||
*/
|
||||
|
||||
SparkSessionUtil.spark.close()
|
||||
System.exit(0)
|
||||
|
||||
}
|
||||
|
||||
def mergeVertexIp(): RDD[(String,Row)]={
|
||||
val vertexIpDf = BaseClickhouseData.getVertexIpDf
|
||||
val frame = vertexIpDf.groupBy("IP").agg(
|
||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
|
||||
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
|
||||
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
|
||||
collect_list("ip_type").alias("ip_type_list")
|
||||
)
|
||||
val values = frame.rdd.map(row => (row.getAs[String]("IP"), row))
|
||||
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
values
|
||||
}
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user