Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
264afdaa3e | ||
|
|
423e9d9b44 | ||
|
|
d61fbee61a | ||
|
|
2f7cceb826 | ||
|
|
51d2549902 | ||
|
|
f0cebd8e1c |
@@ -14,7 +14,7 @@ public class ApplicationConfig {
|
|||||||
public static final Integer ARANGODB_BATCH = ConfigUtils.getIntProperty( "arangoDB.batch");
|
public static final Integer ARANGODB_BATCH = ConfigUtils.getIntProperty( "arangoDB.batch");
|
||||||
|
|
||||||
public static final Integer UPDATE_ARANGO_BATCH = ConfigUtils.getIntProperty("update.arango.batch");
|
public static final Integer UPDATE_ARANGO_BATCH = ConfigUtils.getIntProperty("update.arango.batch");
|
||||||
public static final String ARANGODB_READ_LIMIT = ConfigUtils.getStringProperty("arangoDB.read.limit");
|
public static final Long ARANGODB_READ_LIMIT = ConfigUtils.getLongProperty("arangoDB.read.limit");
|
||||||
|
|
||||||
public static final Integer THREAD_POOL_NUMBER = ConfigUtils.getIntProperty( "thread.pool.number");
|
public static final Integer THREAD_POOL_NUMBER = ConfigUtils.getIntProperty( "thread.pool.number");
|
||||||
public static final Integer THREAD_AWAIT_TERMINATION_TIME = ConfigUtils.getIntProperty( "thread.await.termination.time");
|
public static final Integer THREAD_AWAIT_TERMINATION_TIME = ConfigUtils.getIntProperty( "thread.await.termination.time");
|
||||||
|
|||||||
@@ -129,6 +129,9 @@ public class BaseArangoData {
|
|||||||
private String getQuerySql(Long cnt,int threadNumber, String table){
|
private String getQuerySql(Long cnt,int threadNumber, String table){
|
||||||
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER + 1;
|
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER + 1;
|
||||||
long offsetNum = threadNumber * sepNum;
|
long offsetNum = threadNumber * sepNum;
|
||||||
|
if (sepNum > ApplicationConfig.ARANGODB_READ_LIMIT){
|
||||||
|
sepNum = ApplicationConfig.ARANGODB_READ_LIMIT;
|
||||||
|
}
|
||||||
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -44,8 +44,8 @@ public class UpdateGraphData {
|
|||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
|
// updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
|
||||||
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
||||||
|
|
||||||
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
|
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
|
||||||
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
||||||
@@ -79,8 +79,8 @@ public class UpdateGraphData {
|
|||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
updateDocument("FQDN", Fqdn.class,BaseDocument.class,
|
// updateDocument("FQDN", Fqdn.class,BaseDocument.class,
|
||||||
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
|
||||||
|
|
||||||
updateDocument("IP", Ip.class,BaseDocument.class,
|
updateDocument("IP", Ip.class,BaseDocument.class,
|
||||||
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
|
||||||
|
|||||||
@@ -257,40 +257,40 @@ public class ReadClickhouseData {
|
|||||||
|
|
||||||
public static String getVertexFqdnSql() {
|
public static String getVertexFqdnSql() {
|
||||||
String where = "common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
String where = "common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||||
String sslSql = "SELECT ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni";
|
String sslSql = "SELECT ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni";
|
||||||
String httpSql = "SELECT http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host";
|
String httpSql = "SELECT http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host";
|
||||||
return "SELECT FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME FROM ((" + sslSql + ") UNION ALL (" + httpSql + ")) GROUP BY FQDN HAVING FQDN != ''";
|
return "SELECT FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME FROM ((" + sslSql + ") UNION ALL (" + httpSql + ")) GROUP BY FQDN HAVING FQDN != ''";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getVertexIpSql() {
|
public static String getVertexIpSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||||
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_c2s) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.session_record where " + where + " group by IP";
|
||||||
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
|
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_s2c) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.session_record where " + where + " group by IP";
|
||||||
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
|
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getRelationshipFqdnAddressIpSql() {
|
public static String getRelationshipFqdnAddressIpSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||||
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
|
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
|
||||||
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
|
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
|
||||||
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getRelationshipIpVisitFqdnSql() {
|
public static String getRelationshipIpVisitFqdnSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
|
||||||
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
|
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
|
||||||
String sslSql = "SELECT ssl_sni AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE common_schema_type = 'SSL' GROUP BY ssl_sni,common_client_ip";
|
String sslSql = "SELECT ssl_sni AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'TLS' AS schema_type FROM tsg_galaxy_v3.session_record WHERE common_schema_type = 'SSL' GROUP BY ssl_sni,common_client_ip";
|
||||||
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getVertexSubscriberSql() {
|
public static String getVertexSubscriberSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
|
||||||
return "SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log WHERE" + where + " GROUP BY common_subscriber_id";
|
return "SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record WHERE" + where + " GROUP BY common_subscriber_id";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getRelationshipSubsciberLocateIpSql() {
|
public static String getRelationshipSubsciberLocateIpSql() {
|
||||||
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_framed_ip != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
|
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_framed_ip != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
|
||||||
return "SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME,COUNT(*) as COUNT_TOTAL FROM radius_record_log WHERE" + where + " GROUP BY common_subscriber_id,radius_framed_ip";
|
return "SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME,COUNT(*) as COUNT_TOTAL FROM radius_record WHERE" + where + " GROUP BY common_subscriber_id,radius_framed_ip";
|
||||||
}
|
}
|
||||||
|
|
||||||
private static long[] getTimeLimit() {
|
private static long[] getTimeLimit() {
|
||||||
|
|||||||
@@ -33,10 +33,10 @@ public class Ip extends Vertex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void updateIpByType(BaseDocument newDocument, BaseDocument historyDocument) {
|
private void updateIpByType(BaseDocument newDocument, BaseDocument historyDocument) {
|
||||||
putSumAttribute(newDocument, historyDocument, "CLIENT_SESSION_COUNT");
|
// putSumAttribute(newDocument, historyDocument, "CLIENT_SESSION_COUNT");
|
||||||
putSumAttribute(newDocument, historyDocument, "CLIENT_BYTES_SUM");
|
// putSumAttribute(newDocument, historyDocument, "CLIENT_BYTES_SUM");
|
||||||
putSumAttribute(newDocument, historyDocument, "SERVER_SESSION_COUNT");
|
// putSumAttribute(newDocument, historyDocument, "SERVER_SESSION_COUNT");
|
||||||
putSumAttribute(newDocument, historyDocument, "SERVER_BYTES_SUM");
|
// putSumAttribute(newDocument, historyDocument, "SERVER_BYTES_SUM");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ public class IpLearningApplicationTest {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
UpdateGraphData updateGraphData = new UpdateGraphData();
|
UpdateGraphData updateGraphData = new UpdateGraphData();
|
||||||
// updateGraphData.updateArango();
|
|
||||||
updateGraphData.updateArango2();
|
updateGraphData.updateArango2();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,13 @@
|
|||||||
#arangoDB参数配置
|
#arangoDB参数配置
|
||||||
arangoDB.host=192.168.40.182
|
arangoDB.host=192.168.44.12
|
||||||
#arangoDB.host=192.168.40.224
|
|
||||||
arangoDB.port=8529
|
arangoDB.port=8529
|
||||||
arangoDB.user=upsert
|
arangoDB.user=root
|
||||||
arangoDB.password=ceiec2018
|
arangoDB.password=ceiec2019
|
||||||
arangoDB.DB.name=ip-learning-test
|
arangoDB.DB.name=tsg_galaxy_v3
|
||||||
#arangoDB.DB.name=tsg_galaxy_v3
|
|
||||||
arangoDB.batch=100000
|
arangoDB.batch=100000
|
||||||
arangoDB.ttl=3600
|
arangoDB.ttl=3600
|
||||||
|
|
||||||
arangoDB.read.limit=
|
arangoDB.read.limit=10000000
|
||||||
update.arango.batch=10000
|
update.arango.batch=10000
|
||||||
|
|
||||||
thread.pool.number=10
|
thread.pool.number=10
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
drivers=ru.yandex.clickhouse.ClickHouseDriver
|
drivers=ru.yandex.clickhouse.ClickHouseDriver
|
||||||
mdb.user=default
|
mdb.user=default
|
||||||
db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
|
db.id=192.168.44.67:8123/tsg_galaxy_v3?socket_timeout=3600000
|
||||||
mdb.password=111111
|
mdb.password=ceiec2019
|
||||||
#db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=300000
|
|
||||||
#mdb.password=ceiec2019
|
|
||||||
initialsize=1
|
initialsize=1
|
||||||
minidle=1
|
minidle=1
|
||||||
maxactive=50
|
maxactive=50
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
package cn.ac.iie;
|
|
||||||
|
|
||||||
import cn.ac.iie.dao.BaseArangoData;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import com.arangodb.entity.BaseEdgeDocument;
|
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.util.Enumeration;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
|
|
||||||
public class TestReadArango {
|
|
||||||
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
|
|
||||||
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
|
|
||||||
|
|
||||||
private static BaseArangoData baseArangoData = new BaseArangoData();
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadFqdnFromArango() {
|
|
||||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyData =
|
|
||||||
baseArangoData.readHistoryData("FQDN", BaseDocument.class);
|
|
||||||
printMap(historyData);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadFqdnLocIpFromArango() {
|
|
||||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> ip =
|
|
||||||
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", BaseEdgeDocument.class);
|
|
||||||
printMap(ip);
|
|
||||||
}
|
|
||||||
|
|
||||||
private <T extends BaseDocument> void printMap(ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyData) {
|
|
||||||
ConcurrentHashMap<String, T> map = historyData.get(2);
|
|
||||||
Enumeration<String> keys = map.keys();
|
|
||||||
while (keys.hasMoreElements()) {
|
|
||||||
String key = keys.nextElement();
|
|
||||||
T document = map.get(key);
|
|
||||||
System.out.println(document.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@After
|
|
||||||
public void clearSource() {
|
|
||||||
pool.shutdown();
|
|
||||||
arangoManger.clean();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
package cn.ac.iie.dao;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import cn.ac.iie.service.read.ReadHistoryArangoData;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 获取arangoDB历史数据
|
|
||||||
*
|
|
||||||
* @author wlh
|
|
||||||
*/
|
|
||||||
public class BaseArangoData {
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
|
||||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
|
||||||
|
|
||||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
|
||||||
|
|
||||||
public <T extends BaseDocument> void readHistoryData(String table,
|
|
||||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
|
||||||
Class<T> type) {
|
|
||||||
try {
|
|
||||||
LOG.warn("开始更新" + table);
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
|
||||||
historyMap.put(i, new ConcurrentHashMap<>());
|
|
||||||
}
|
|
||||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
|
|
||||||
// long[] timeRange = getTimeRange(table);
|
|
||||||
Long countTotal = getCountTotal(table);
|
|
||||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
|
||||||
// String sql = getQuerySql(timeRange, i, table);
|
|
||||||
String sql = getQuerySql(countTotal, i, table);
|
|
||||||
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
|
|
||||||
threadPool.executor(readHistoryArangoData);
|
|
||||||
}
|
|
||||||
countDownLatch.await();
|
|
||||||
long last = System.currentTimeMillis();
|
|
||||||
LOG.warn("读取" + table + " arangoDB 共耗时:" + (last - start));
|
|
||||||
} catch (Exception e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private Long getCountTotal(String table){
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
Long cnt = 0L;
|
|
||||||
String sql = "RETURN LENGTH("+table+")";
|
|
||||||
try {
|
|
||||||
ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
|
|
||||||
while (longs.hasNext()){
|
|
||||||
cnt = longs.next();
|
|
||||||
}
|
|
||||||
}catch (Exception e){
|
|
||||||
LOG.error(sql +"执行异常");
|
|
||||||
}
|
|
||||||
long last = System.currentTimeMillis();
|
|
||||||
LOG.info(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
|
|
||||||
return cnt;
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getQuerySql(Long cnt,int threadNumber, String table){
|
|
||||||
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER() + 1;
|
|
||||||
long offsetNum = threadNumber * sepNum;
|
|
||||||
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
package cn.ac.iie.service.read;
|
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig;
|
|
||||||
import cn.ac.iie.utils.ArangoDBConnect;
|
|
||||||
import com.arangodb.ArangoCursor;
|
|
||||||
import com.arangodb.entity.BaseDocument;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @author wlh
|
|
||||||
* 多线程全量读取arangoDb历史数据,封装到map
|
|
||||||
*/
|
|
||||||
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
|
|
||||||
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
|
|
||||||
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
|
|
||||||
|
|
||||||
public static final HashSet<String> PROTOCOL_SET;
|
|
||||||
|
|
||||||
static {
|
|
||||||
PROTOCOL_SET = new HashSet<>();
|
|
||||||
PROTOCOL_SET.add("HTTP");
|
|
||||||
PROTOCOL_SET.add("TLS");
|
|
||||||
PROTOCOL_SET.add("DNS");
|
|
||||||
}
|
|
||||||
|
|
||||||
private ArangoDBConnect arangoConnect;
|
|
||||||
private String query;
|
|
||||||
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
|
|
||||||
private Class<T> type;
|
|
||||||
private String table;
|
|
||||||
private CountDownLatch countDownLatch;
|
|
||||||
|
|
||||||
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
|
|
||||||
String query,
|
|
||||||
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
|
|
||||||
Class<T> type,
|
|
||||||
String table,
|
|
||||||
CountDownLatch countDownLatch) {
|
|
||||||
this.arangoConnect = arangoConnect;
|
|
||||||
this.query = query;
|
|
||||||
this.map = map;
|
|
||||||
this.type = type;
|
|
||||||
this.table = table;
|
|
||||||
this.countDownLatch = countDownLatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
long s = System.currentTimeMillis();
|
|
||||||
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
|
|
||||||
if (docs != null) {
|
|
||||||
List<T> baseDocuments = docs.asListRemaining();
|
|
||||||
int i = 0;
|
|
||||||
for (T doc : baseDocuments) {
|
|
||||||
String key = doc.getKey();
|
|
||||||
switch (table) {
|
|
||||||
case "R_LOCATE_FQDN2IP":
|
|
||||||
updateProtocolDocument(doc);
|
|
||||||
deleteDistinctClientIpByTime(doc);
|
|
||||||
break;
|
|
||||||
case "R_VISIT_IP2FQDN":
|
|
||||||
updateProtocolDocument(doc);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER();
|
|
||||||
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
|
|
||||||
tmpMap.put(key, doc);
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
long l = System.currentTimeMillis();
|
|
||||||
LOG.warn(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
|
|
||||||
}
|
|
||||||
}catch (Exception e){
|
|
||||||
e.printStackTrace();
|
|
||||||
}finally {
|
|
||||||
countDownLatch.countDown();
|
|
||||||
LOG.warn("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateProtocolDocument(T doc) {
|
|
||||||
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
|
|
||||||
for (String protocol : PROTOCOL_SET) {
|
|
||||||
String protocolRecent = protocol + "_CNT_RECENT";
|
|
||||||
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
|
|
||||||
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
|
|
||||||
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
|
|
||||||
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
|
|
||||||
cntRecentsDst[0] = 0L;
|
|
||||||
doc.addAttribute(protocolRecent, cntRecentsDst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void deleteDistinctClientIpByTime(T doc) {
|
|
||||||
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
|
|
||||||
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
|
|
||||||
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
|
|
||||||
Collections.sort(distCipTs);
|
|
||||||
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
|
|
||||||
String[] distCipArr = new String[index];
|
|
||||||
long[] disCipTsArr = new long[index];
|
|
||||||
if (distCip.size() + 1 == distCipTs.size()){
|
|
||||||
for (int i = 0; i < index; i++) {
|
|
||||||
distCipArr[i] = distCip.get(i);
|
|
||||||
disCipTsArr[i] = distCipTs.get(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
doc.updateAttribute("DIST_CIP", distCipArr);
|
|
||||||
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -22,11 +22,12 @@ public class ArangoDBConnect {
|
|||||||
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
|
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
|
||||||
private static ArangoDB arangoDB = null;
|
private static ArangoDB arangoDB = null;
|
||||||
private static ArangoDBConnect conn = null;
|
private static ArangoDBConnect conn = null;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
getArangoDatabase();
|
getArangoDatabase();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void getArangoDatabase(){
|
private static void getArangoDatabase() {
|
||||||
arangoDB = new ArangoDB.Builder()
|
arangoDB = new ArangoDB.Builder()
|
||||||
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER())
|
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER())
|
||||||
.host(ApplicationConfig.ARANGODB_HOST(), ApplicationConfig.ARANGODB_PORT())
|
.host(ApplicationConfig.ARANGODB_HOST(), ApplicationConfig.ARANGODB_PORT())
|
||||||
@@ -35,82 +36,81 @@ public class ArangoDBConnect {
|
|||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static synchronized ArangoDBConnect getInstance(){
|
public static synchronized ArangoDBConnect getInstance() {
|
||||||
if (null == conn){
|
if (null == conn) {
|
||||||
conn = new ArangoDBConnect();
|
conn = new ArangoDBConnect();
|
||||||
}
|
}
|
||||||
return conn;
|
return conn;
|
||||||
}
|
}
|
||||||
|
|
||||||
private ArangoDatabase getDatabase(){
|
private ArangoDatabase getDatabase() {
|
||||||
return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME());
|
return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void clean(){
|
public void clean() {
|
||||||
try {
|
try {
|
||||||
if (arangoDB != null){
|
if (arangoDB != null) {
|
||||||
arangoDB.shutdown();
|
arangoDB.shutdown();
|
||||||
}
|
}
|
||||||
}catch (Exception e){
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public <T> ArangoCursor<T> executorQuery(String query,Class<T> type){
|
public <T> ArangoCursor<T> executorQuery(String query, Class<T> type) {
|
||||||
ArangoDatabase database = getDatabase();
|
ArangoDatabase database = getDatabase();
|
||||||
Map<String, Object> bindVars = new MapBuilder().get();
|
Map<String, Object> bindVars = new MapBuilder().get();
|
||||||
AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL());
|
AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL());
|
||||||
try {
|
try {
|
||||||
return database.query(query, bindVars, options, type);
|
return database.query(query, bindVars, options, type);
|
||||||
}catch (Exception e){
|
} catch (Exception e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
return null;
|
return null;
|
||||||
}finally {
|
} finally {
|
||||||
bindVars.clear();
|
bindVars.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
|
public <T> void insertAndUpdate(ArrayList<T> docInsert, ArrayList<T> docUpdate, String collectionName) {
|
||||||
ArangoDatabase database = getDatabase();
|
ArangoDatabase database = getDatabase();
|
||||||
try {
|
try {
|
||||||
ArangoCollection collection = database.collection(collectionName);
|
ArangoCollection collection = database.collection(collectionName);
|
||||||
if (!docInsert.isEmpty()){
|
if (!docInsert.isEmpty()) {
|
||||||
collection.importDocuments(docInsert);
|
collection.importDocuments(docInsert);
|
||||||
}
|
}
|
||||||
if (!docUpdate.isEmpty()){
|
if (!docUpdate.isEmpty()) {
|
||||||
collection.replaceDocuments(docUpdate);
|
collection.replaceDocuments(docUpdate);
|
||||||
}
|
}
|
||||||
}catch (Exception e){
|
} catch (Exception e) {
|
||||||
System.out.println("更新失败");
|
System.out.println("更新失败");
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}finally {
|
} finally {
|
||||||
docInsert.clear();
|
docInsert.clear();
|
||||||
docInsert.clear();
|
docInsert.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
|
public <T> void overwrite(ArrayList<T> docOverwrite, String collectionName) {
|
||||||
ArangoDatabase database = getDatabase();
|
ArangoDatabase database = getDatabase();
|
||||||
try {
|
try {
|
||||||
ArangoCollection collection = database.collection(collectionName);
|
ArangoCollection collection = database.collection(collectionName);
|
||||||
if (!docOverwrite.isEmpty()){
|
if (!docOverwrite.isEmpty()) {
|
||||||
DocumentCreateOptions documentCreateOptions = new DocumentCreateOptions();
|
DocumentCreateOptions documentCreateOptions = new DocumentCreateOptions();
|
||||||
documentCreateOptions.overwrite(true);
|
documentCreateOptions.overwrite(true);
|
||||||
documentCreateOptions.silent(true);
|
documentCreateOptions.silent(true);
|
||||||
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
|
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
|
||||||
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
|
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
|
||||||
for (ErrorEntity errorEntity:errors){
|
for (ErrorEntity errorEntity : errors) {
|
||||||
LOG.warn("写入arangoDB异常:"+errorEntity.getErrorMessage());
|
LOG.warn("写入arangoDB异常:" + errorEntity.getErrorMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}catch (Exception e){
|
} catch (Exception e) {
|
||||||
System.out.println("更新失败:"+e.toString());
|
LOG.error("更新失败:" + e.toString());
|
||||||
}finally {
|
} finally {
|
||||||
docOverwrite.clear();
|
docOverwrite.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,27 +13,32 @@ spark.read.clickhouse.password=ceiec2019
|
|||||||
spark.read.clickhouse.numPartitions=5
|
spark.read.clickhouse.numPartitions=5
|
||||||
spark.read.clickhouse.fetchsize=10000
|
spark.read.clickhouse.fetchsize=10000
|
||||||
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
|
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
|
||||||
|
spark.read.clickhouse.session.table=session_record
|
||||||
|
spark.read.clickhouse.radius.table=radius_record
|
||||||
clickhouse.socket.timeout=300000
|
clickhouse.socket.timeout=300000
|
||||||
#arangoDB配置
|
#arangoDB配置
|
||||||
arangoDB.host=192.168.40.182
|
#arangoDB.host=192.168.40.223
|
||||||
|
arangoDB.host=192.168.44.12
|
||||||
arangoDB.port=8529
|
arangoDB.port=8529
|
||||||
arangoDB.user=upsert
|
arangoDB.user=root
|
||||||
arangoDB.password=ceiec2018
|
#arangoDB.password=galaxy_2019
|
||||||
arangoDB.DB.name=ip-learning-test-0
|
arangoDB.password=ceiec2019
|
||||||
|
arangoDB.DB.name=tsg_galaxy_v3_test
|
||||||
#arangoDB.DB.name=iplearn_media_domain
|
#arangoDB.DB.name=iplearn_media_domain
|
||||||
arangoDB.ttl=3600
|
arangoDB.ttl=3600
|
||||||
|
|
||||||
thread.pool.number=10
|
thread.pool.number=10
|
||||||
|
|
||||||
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
||||||
clickhouse.time.limit.type=1
|
clickhouse.time.limit.type=0
|
||||||
read.clickhouse.max.time=1603785961
|
read.clickhouse.max.time=1608518990
|
||||||
read.clickhouse.min.time=1603354682
|
read.clickhouse.min.time=1604851201
|
||||||
|
|
||||||
arangoDB.read.limit=1
|
|
||||||
update.arango.batch=10000
|
update.arango.batch=10000
|
||||||
|
|
||||||
distinct.client.ip.num=10000
|
distinct.client.ip.num=10000
|
||||||
recent.count.hour=24
|
recent.count.hour=24
|
||||||
|
|
||||||
update.interval=3600
|
update.interval=3600
|
||||||
|
|
||||||
|
arangodb.total.num=20000000
|
||||||
|
|||||||
@@ -4,20 +4,15 @@ log4j.logger.org.apache.http.wire=OFF
|
|||||||
|
|
||||||
#Log4j
|
#Log4j
|
||||||
log4j.rootLogger=info,console,file
|
log4j.rootLogger=info,console,file
|
||||||
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
|
||||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||||
log4j.appender.console.Threshold=warn
|
log4j.appender.console.Threshold=warn
|
||||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||||
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
|
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
|
||||||
|
|
||||||
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
|
||||||
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
|
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
|
||||||
log4j.appender.file.Threshold=warn
|
log4j.appender.file.Threshold=warn
|
||||||
log4j.appender.file.encoding=UTF-8
|
log4j.appender.file.encoding=UTF-8
|
||||||
log4j.appender.file.Append=true
|
log4j.appender.file.Append=true
|
||||||
#·<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><D3A6>Ŀ<EFBFBD><C4BF>
|
|
||||||
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
|
|
||||||
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
|
|
||||||
log4j.appender.file.file=./logs/ip-learning-application.log
|
log4j.appender.file.file=./logs/ip-learning-application.log
|
||||||
log4j.appender.file.DatePattern='.'yyyy-MM-dd
|
log4j.appender.file.DatePattern='.'yyyy-MM-dd
|
||||||
log4j.appender.file.layout=org.apache.log4j.PatternLayout
|
log4j.appender.file.layout=org.apache.log4j.PatternLayout
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ object ApplicationConfig {
|
|||||||
val SPARK_READ_CLICKHOUSE_PASSWORD: String = config.getString("spark.read.clickhouse.password")
|
val SPARK_READ_CLICKHOUSE_PASSWORD: String = config.getString("spark.read.clickhouse.password")
|
||||||
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
|
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
|
||||||
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
|
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
|
||||||
|
val SPARK_READ_CLICKHOUSE_SESSION_TABLE: String = config.getString("spark.read.clickhouse.session.table")
|
||||||
|
val SPARK_READ_CLICKHOUSE_RADIUS_TABLE: String = config.getString("spark.read.clickhouse.radius.table")
|
||||||
|
|
||||||
val ARANGODB_HOST: String= config.getString("arangoDB.host")
|
val ARANGODB_HOST: String= config.getString("arangoDB.host")
|
||||||
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
|
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
|
||||||
@@ -36,11 +38,12 @@ object ApplicationConfig {
|
|||||||
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
|
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
|
||||||
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
|
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
|
||||||
|
|
||||||
val ARANGODB_READ_LIMIT: Int = config.getInt("arangoDB.read.limit")
|
|
||||||
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
|
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
|
||||||
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
|
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
|
||||||
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
|
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
|
||||||
|
|
||||||
val UPDATE_INTERVAL: Int = config.getInt("update.interval")
|
val UPDATE_INTERVAL: Int = config.getInt("update.interval")
|
||||||
|
|
||||||
|
val ARANGODB_TOTAL_NUM: Long = config.getLong("arangodb.total.num")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,22 @@
|
|||||||
|
package cn.ac.iie.dao
|
||||||
|
|
||||||
|
import cn.ac.iie.config.ApplicationConfig
|
||||||
|
import cn.ac.iie.spark.ArangoSpark
|
||||||
|
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions}
|
||||||
|
import cn.ac.iie.utils.SparkSessionUtil.sparkContext
|
||||||
|
import org.slf4j.LoggerFactory
|
||||||
|
|
||||||
|
import scala.reflect.ClassTag
|
||||||
|
|
||||||
|
object BaseArangoData {
|
||||||
|
private val LOG = LoggerFactory.getLogger(BaseArangoData.getClass)
|
||||||
|
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
|
||||||
|
|
||||||
|
def loadArangoRdd[T: ClassTag](name:String): ArangoRdd[T] ={
|
||||||
|
val value = ArangoSpark.load[T](sparkContext, name, options)
|
||||||
|
|
||||||
|
LOG.warn(s"读取$name arangoDb:${value.count()}")
|
||||||
|
value
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -11,7 +11,7 @@ object BaseClickhouseData {
|
|||||||
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
|
||||||
private val timeLimit: (Long, Long) = getTimeLimit
|
private val timeLimit: (Long, Long) = getTimeLimit
|
||||||
|
|
||||||
private def initClickhouseData(sql:String): DataFrame ={
|
private def initClickhouseData(sql: String): DataFrame = {
|
||||||
|
|
||||||
val dataFrame: DataFrame = spark.read.format("jdbc")
|
val dataFrame: DataFrame = spark.read.format("jdbc")
|
||||||
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
|
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
|
||||||
@@ -24,7 +24,7 @@ object BaseClickhouseData {
|
|||||||
.option("lowerBound", timeLimit._2)
|
.option("lowerBound", timeLimit._2)
|
||||||
.option("upperBound", timeLimit._1)
|
.option("upperBound", timeLimit._1)
|
||||||
.option("fetchsize", ApplicationConfig.SPARK_READ_CLICKHOUSE_FETCHSIZE)
|
.option("fetchsize", ApplicationConfig.SPARK_READ_CLICKHOUSE_FETCHSIZE)
|
||||||
.option("socket_timeout",ApplicationConfig.CLICKHOUSE_SOCKET_TIMEOUT)
|
.option("socket_timeout", ApplicationConfig.CLICKHOUSE_SOCKET_TIMEOUT)
|
||||||
.load()
|
.load()
|
||||||
dataFrame.printSchema()
|
dataFrame.printSchema()
|
||||||
dataFrame.createOrReplaceGlobalTempView("dbtable")
|
dataFrame.createOrReplaceGlobalTempView("dbtable")
|
||||||
@@ -32,149 +32,22 @@ object BaseClickhouseData {
|
|||||||
dataFrame
|
dataFrame
|
||||||
}
|
}
|
||||||
|
|
||||||
def loadConnectionDataFromCk(): Unit ={
|
def getVertexFqdnDf: DataFrame = {
|
||||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||||
val sql =
|
val sql =
|
||||||
s"""
|
s"""
|
||||||
|(SELECT
|
|
||||||
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|
|
||||||
|FROM
|
|
||||||
| connection_record_log
|
|
||||||
|WHERE $where) as dbtable
|
|
||||||
""".stripMargin
|
|
||||||
|
|
||||||
LOG.warn(sql)
|
|
||||||
initClickhouseData(sql)
|
|
||||||
}
|
|
||||||
|
|
||||||
private def loadRadiusDataFromCk(): Unit ={
|
|
||||||
val where =
|
|
||||||
s"""
|
|
||||||
| common_recv_time >= ${timeLimit._2}
|
|
||||||
| AND common_recv_time < ${timeLimit._1}
|
|
||||||
| AND common_subscriber_id != ''
|
|
||||||
| AND radius_framed_ip != ''
|
|
||||||
| AND radius_packet_type = 4
|
|
||||||
| AND radius_acct_status_type = 1
|
|
||||||
""".stripMargin
|
|
||||||
val sql =
|
|
||||||
s"""
|
|
||||||
|(SELECT
|
|
||||||
| common_subscriber_id,radius_framed_ip,common_recv_time
|
|
||||||
|FROM
|
|
||||||
| tsg_galaxy_v3.radius_record_log
|
|
||||||
|WHERE
|
|
||||||
| $where) as dbtable
|
|
||||||
""".stripMargin
|
|
||||||
LOG.warn(sql)
|
|
||||||
initClickhouseData(sql)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
def getVertexIpDf: DataFrame ={
|
|
||||||
loadConnectionDataFromCk()
|
|
||||||
val sql =
|
|
||||||
"""
|
|
||||||
|SELECT
|
|
||||||
| *
|
|
||||||
|FROM
|
|
||||||
| (
|
|
||||||
| (
|
|
||||||
| SELECT
|
|
||||||
| common_client_ip AS IP,
|
|
||||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
|
||||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
|
||||||
| count(*) as SESSION_COUNT,
|
|
||||||
| sum(common_c2s_byte_num) as BYTES_SUM,
|
|
||||||
| 'client' as ip_type
|
|
||||||
| FROM
|
|
||||||
| global_temp.dbtable
|
|
||||||
| GROUP BY
|
|
||||||
| IP
|
|
||||||
| )
|
|
||||||
| UNION ALL
|
|
||||||
| (
|
|
||||||
| SELECT
|
|
||||||
| common_server_ip AS IP,
|
|
||||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
|
||||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
|
||||||
| count(*) as SESSION_COUNT,
|
|
||||||
| sum(common_s2c_byte_num) as BYTES_SUM,
|
|
||||||
| 'server' as ip_type
|
|
||||||
| FROM
|
|
||||||
| global_temp.dbtable
|
|
||||||
| GROUP BY
|
|
||||||
| IP
|
|
||||||
| )
|
|
||||||
| )
|
|
||||||
""".stripMargin
|
|
||||||
LOG.warn(sql)
|
|
||||||
val vertexIpDf = spark.sql(sql)
|
|
||||||
vertexIpDf.printSchema()
|
|
||||||
vertexIpDf
|
|
||||||
}
|
|
||||||
|
|
||||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
|
||||||
loadConnectionDataFromCk()
|
|
||||||
val sslSql =
|
|
||||||
"""
|
|
||||||
|SELECT
|
|
||||||
| ssl_sni AS FQDN,
|
|
||||||
| common_server_ip,
|
|
||||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
|
||||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
|
||||||
| COUNT(*) AS COUNT_TOTAL,
|
|
||||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
|
||||||
| 'TLS' AS schema_type
|
|
||||||
|FROM
|
|
||||||
| global_temp.dbtable
|
|
||||||
|WHERE
|
|
||||||
| common_schema_type = 'SSL'
|
|
||||||
|GROUP BY
|
|
||||||
| ssl_sni,common_server_ip
|
|
||||||
""".stripMargin
|
|
||||||
|
|
||||||
val httpSql =
|
|
||||||
"""
|
|
||||||
|SELECT
|
|
||||||
| http_host AS FQDN,
|
|
||||||
| common_server_ip,
|
|
||||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
|
||||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
|
||||||
| COUNT(*) AS COUNT_TOTAL,
|
|
||||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
|
||||||
| 'HTTP' AS schema_type
|
|
||||||
|FROM
|
|
||||||
| global_temp.dbtable
|
|
||||||
|WHERE
|
|
||||||
| common_schema_type = 'HTTP'
|
|
||||||
|GROUP BY
|
|
||||||
| http_host,common_server_ip
|
|
||||||
""".stripMargin
|
|
||||||
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
|
|
||||||
|
|
||||||
LOG.warn(sql)
|
|
||||||
val relationFqdnLocateIpDf = spark.sql(sql)
|
|
||||||
relationFqdnLocateIpDf.printSchema()
|
|
||||||
relationFqdnLocateIpDf
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
def getVertexFqdnDf: DataFrame ={
|
|
||||||
val sql =
|
|
||||||
"""
|
|
||||||
|(SELECT
|
|(SELECT
|
||||||
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|
||||||
|FROM
|
|FROM
|
||||||
| ((SELECT
|
| ((SELECT
|
||||||
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||||
| FROM tsg_galaxy_v3.connection_record_log
|
| FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|
||||||
| WHERE common_schema_type = 'SSL' GROUP BY ssl_sni
|
| WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni
|
||||||
| )UNION ALL
|
| )UNION ALL
|
||||||
| (SELECT
|
| (SELECT
|
||||||
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||||
| FROM tsg_galaxy_v3.connection_record_log
|
| FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|
||||||
| WHERE common_schema_type = 'HTTP' GROUP BY http_host))
|
| WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host))
|
||||||
|GROUP BY FQDN HAVING FQDN != '') as dbtable
|
|GROUP BY FQDN HAVING FQDN != '') as dbtable
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
LOG.warn(sql)
|
LOG.warn(sql)
|
||||||
@@ -183,7 +56,7 @@ object BaseClickhouseData {
|
|||||||
frame
|
frame
|
||||||
}
|
}
|
||||||
|
|
||||||
def getVertexIpDf: DataFrame ={
|
def getVertexIpDf: DataFrame = {
|
||||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||||
val sql =
|
val sql =
|
||||||
s"""
|
s"""
|
||||||
@@ -194,7 +67,7 @@ object BaseClickhouseData {
|
|||||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||||
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|
||||||
|'client' as ip_type
|
|'client' as ip_type
|
||||||
|FROM tsg_galaxy_v3.connection_record_log
|
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|
||||||
|where $where
|
|where $where
|
||||||
|group by common_client_ip)
|
|group by common_client_ip)
|
||||||
|UNION ALL
|
|UNION ALL
|
||||||
@@ -205,7 +78,7 @@ object BaseClickhouseData {
|
|||||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||||
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|
||||||
|'server' as ip_type
|
|'server' as ip_type
|
||||||
|FROM tsg_galaxy_v3.connection_record_log
|
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|
||||||
|where $where
|
|where $where
|
||||||
|group by common_server_ip))) as dbtable
|
|group by common_server_ip))) as dbtable
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
@@ -216,19 +89,19 @@ object BaseClickhouseData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
def getRelationFqdnLocateIpDf: DataFrame = {
|
||||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||||
val sql =
|
val sql =
|
||||||
s"""
|
s"""
|
||||||
|(SELECT * FROM
|
|(SELECT * FROM
|
||||||
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
||||||
|FROM tsg_galaxy_v3.connection_record_log
|
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|
||||||
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
||||||
|UNION ALL
|
|UNION ALL
|
||||||
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
||||||
|FROM tsg_galaxy_v3.connection_record_log
|
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|
||||||
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
||||||
|WHERE FQDN != '') as dbtable
|
|WHERE FQDN != '') as dbtable
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
@@ -238,7 +111,7 @@ object BaseClickhouseData {
|
|||||||
frame
|
frame
|
||||||
}
|
}
|
||||||
|
|
||||||
def getRelationSubidLocateIpDf: DataFrame ={
|
def getRelationSubidLocateIpDf: DataFrame = {
|
||||||
val where =
|
val where =
|
||||||
s"""
|
s"""
|
||||||
| common_recv_time >= ${timeLimit._2}
|
| common_recv_time >= ${timeLimit._2}
|
||||||
@@ -250,7 +123,7 @@ object BaseClickhouseData {
|
|||||||
s"""
|
s"""
|
||||||
|(
|
|(
|
||||||
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|
||||||
|FROM radius_record_log
|
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_RADIUS_TABLE}
|
||||||
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|
||||||
|) as dbtable
|
|) as dbtable
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
@@ -260,18 +133,20 @@ object BaseClickhouseData {
|
|||||||
frame
|
frame
|
||||||
}
|
}
|
||||||
|
|
||||||
def getVertexSubidDf: DataFrame ={
|
def getVertexSubidDf: DataFrame = {
|
||||||
val where =
|
val where =
|
||||||
s"""
|
s"""
|
||||||
| common_recv_time >= ${timeLimit._2}
|
| common_recv_time >= ${timeLimit._2}
|
||||||
| AND common_recv_time < ${timeLimit._1}
|
| AND common_recv_time < ${timeLimit._1}
|
||||||
| AND common_subscriber_id != ''
|
| AND common_subscriber_id != ''
|
||||||
| AND radius_framed_ip != ''
|
| AND radius_framed_ip != ''
|
||||||
|
| AND radius_packet_type = 4
|
||||||
|
| AND radius_acct_status_type = 1
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
val sql =
|
val sql =
|
||||||
s"""
|
s"""
|
||||||
|(
|
|(
|
||||||
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log
|
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_RADIUS_TABLE}
|
||||||
|WHERE $where GROUP BY common_subscriber_id
|
|WHERE $where GROUP BY common_subscriber_id
|
||||||
|)as dbtable
|
|)as dbtable
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
@@ -281,18 +156,22 @@ object BaseClickhouseData {
|
|||||||
frame
|
frame
|
||||||
}
|
}
|
||||||
|
|
||||||
def getVertexFramedIpDf: DataFrame ={
|
def getVertexFramedIpDf: DataFrame = {
|
||||||
val where =
|
val where =
|
||||||
s"""
|
s"""
|
||||||
| common_recv_time >= ${timeLimit._2}
|
| common_recv_time >= ${timeLimit._2}
|
||||||
| AND common_recv_time < ${timeLimit._1}
|
| AND common_recv_time < ${timeLimit._1}
|
||||||
| AND common_subscriber_id != ''
|
| AND common_subscriber_id != ''
|
||||||
| AND radius_framed_ip != ''
|
| AND radius_framed_ip != ''
|
||||||
|
| AND radius_packet_type = 4
|
||||||
|
| AND radius_acct_status_type = 1
|
||||||
|
|
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
val sql =
|
val sql =
|
||||||
s"""
|
s"""
|
||||||
|(
|
|(
|
||||||
|SELECT DISTINCT radius_framed_ip,common_recv_time as LAST_FOUND_TIME FROM radius_record_log WHERE $where
|
|SELECT radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_RADIUS_TABLE} WHERE $where
|
||||||
|
|GROUP BY radius_framed_ip
|
||||||
|)as dbtable
|
|)as dbtable
|
||||||
""".stripMargin
|
""".stripMargin
|
||||||
LOG.warn(sql)
|
LOG.warn(sql)
|
||||||
@@ -302,7 +181,7 @@ object BaseClickhouseData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private def getTimeLimit: (Long,Long) ={
|
private def getTimeLimit: (Long, Long) = {
|
||||||
var maxTime = 0L
|
var maxTime = 0L
|
||||||
var minTime = 0L
|
var minTime = 0L
|
||||||
ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE match {
|
ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE match {
|
||||||
|
|||||||
@@ -3,39 +3,33 @@ package cn.ac.iie.service.transform
|
|||||||
import java.util.regex.Pattern
|
import java.util.regex.Pattern
|
||||||
|
|
||||||
import cn.ac.iie.config.ApplicationConfig
|
import cn.ac.iie.config.ApplicationConfig
|
||||||
import cn.ac.iie.dao.BaseClickhouseData
|
import cn.ac.iie.dao.{BaseArangoData, BaseClickhouseData}
|
||||||
import cn.ac.iie.spark.ArangoSpark
|
|
||||||
import cn.ac.iie.spark.partition.CustomPartitioner
|
import cn.ac.iie.spark.partition.CustomPartitioner
|
||||||
import cn.ac.iie.spark.rdd.ReadOptions
|
import cn.ac.iie.spark.rdd.ArangoRdd
|
||||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||||
import org.apache.spark.rdd.RDD
|
import org.apache.spark.rdd.RDD
|
||||||
import org.apache.spark.sql.Row
|
import org.apache.spark.sql.Row
|
||||||
import org.apache.spark.sql.functions._
|
import org.apache.spark.sql.functions._
|
||||||
import org.slf4j.LoggerFactory
|
import org.slf4j.LoggerFactory
|
||||||
import cn.ac.iie.utils.SparkSessionUtil._
|
|
||||||
|
|
||||||
object MergeDataFrame {
|
object MergeDataFrame {
|
||||||
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
|
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
|
||||||
private val pattern = Pattern.compile("^[\\d]*$")
|
private val pattern = Pattern.compile("^[\\d]*$")
|
||||||
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
|
|
||||||
|
|
||||||
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
|
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Row))] = {
|
||||||
val fqdnAccmu = getLongAccumulator("FQDN Accumulator")
|
val fqdnRddRow: RDD[(String, Row)] = BaseClickhouseData.getVertexFqdnDf
|
||||||
val fqdnRddRow = BaseClickhouseData.getVertexFqdnDf
|
.repartition().rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
|
||||||
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
|
|
||||||
fqdnAccmu.add(1)
|
|
||||||
(row.getAs[String]("FQDN"), row)
|
(row.getAs[String]("FQDN"), row)
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
})/*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
|
||||||
fqdnRddRow.cache()
|
|
||||||
val fqdnRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"FQDN",options)
|
|
||||||
|
|
||||||
fqdnRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnRddRow)
|
val fqdnRddDoc: ArangoRdd[BaseDocument] = BaseArangoData.loadArangoRdd[BaseDocument]("FQDN")
|
||||||
|
|
||||||
|
fqdnRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnRddRow)
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Option[Row]))]={
|
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Row))] = {
|
||||||
val ipAccum = getLongAccumulator("IP Accumulator")
|
|
||||||
val vertexIpDf = BaseClickhouseData.getVertexIpDf
|
val vertexIpDf = BaseClickhouseData.getVertexIpDf
|
||||||
val frame = vertexIpDf.groupBy("IP").agg(
|
val frame = vertexIpDf.repartition().groupBy("IP").agg(
|
||||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||||
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
|
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
|
||||||
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
|
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
|
||||||
@@ -44,18 +38,16 @@ object MergeDataFrame {
|
|||||||
last("common_link_info").alias("common_link_info")
|
last("common_link_info").alias("common_link_info")
|
||||||
)
|
)
|
||||||
val ipRddRow = frame.rdd.map(row => {
|
val ipRddRow = frame.rdd.map(row => {
|
||||||
ipAccum.add(1)
|
|
||||||
(row.getAs[String]("IP"), row)
|
(row.getAs[String]("IP"), row)
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
})/*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
|
||||||
val ipRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
|
val ipRddDoc = BaseArangoData.loadArangoRdd[BaseDocument]("IP")
|
||||||
|
ipRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(ipRddRow)
|
||||||
ipRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(ipRddRow)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
|
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
|
||||||
val fqdnLocIpAccum = getLongAccumulator("R_LOCATE_FQDN2IP Accumulator")
|
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf
|
||||||
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
|
.repartition().filter(row => isDomain(row.getAs[String]("FQDN")))
|
||||||
.groupBy("FQDN", "common_server_ip")
|
.groupBy("FQDN", "common_server_ip")
|
||||||
.agg(
|
.agg(
|
||||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||||
@@ -68,54 +60,46 @@ object MergeDataFrame {
|
|||||||
val fqdn = row.getAs[String]("FQDN")
|
val fqdn = row.getAs[String]("FQDN")
|
||||||
val serverIp = row.getAs[String]("common_server_ip")
|
val serverIp = row.getAs[String]("common_server_ip")
|
||||||
val key = fqdn.concat("-" + serverIp)
|
val key = fqdn.concat("-" + serverIp)
|
||||||
fqdnLocIpAccum.add(1)
|
|
||||||
(key, row)
|
(key, row)
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
})/*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
|
||||||
val fqdnLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_FQDN2IP",options)
|
val fqdnLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_FQDN2IP")
|
||||||
|
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnLocIpRddRow)
|
||||||
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnLocIpRddRow)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
|
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
|
||||||
val subidLocIpAccum = getLongAccumulator("R_LOCATE_SUBSCRIBER2IP Accumulator")
|
|
||||||
val subidLocIpRddRow = BaseClickhouseData.getRelationSubidLocateIpDf
|
val subidLocIpRddRow = BaseClickhouseData.getRelationSubidLocateIpDf
|
||||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||||
.rdd.map(row => {
|
.rdd.map(row => {
|
||||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||||
val ip = row.getAs[String]("radius_framed_ip")
|
val ip = row.getAs[String]("radius_framed_ip")
|
||||||
val key = commonSubscriberId.concat("-" + ip)
|
val key = commonSubscriberId.concat("-" + ip)
|
||||||
subidLocIpAccum.add(1)
|
|
||||||
(key, row)
|
(key, row)
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||||
val subidLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_SUBSCRIBER2IP",options)
|
val subidLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_SUBSCRIBER2IP")
|
||||||
|
|
||||||
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidLocIpRddRow)
|
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(subidLocIpRddRow)
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
|
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Row))] = {
|
||||||
val subidAccum = getLongAccumulator("SUBSCRIBER Accumulator")
|
|
||||||
val subidRddRow = BaseClickhouseData.getVertexSubidDf
|
val subidRddRow = BaseClickhouseData.getVertexSubidDf
|
||||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||||
.rdd.map(row => {
|
.rdd.map(row => {
|
||||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||||
subidAccum.add(1)
|
|
||||||
(commonSubscriberId, row)
|
(commonSubscriberId, row)
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||||
|
|
||||||
val subidRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"SUBSCRIBER",options)
|
val subidRddDoc = BaseArangoData.loadArangoRdd[BaseDocument]("SUBSCRIBER")
|
||||||
|
|
||||||
subidRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidRddRow)
|
subidRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(subidRddRow)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeVertexFrameIp: RDD[Row] ={
|
def mergeVertexFrameIp: RDD[Row] = {
|
||||||
val framedIpAccum = getLongAccumulator("framed ip Accumulator")
|
|
||||||
val values = BaseClickhouseData.getVertexFramedIpDf
|
val values = BaseClickhouseData.getVertexFramedIpDf
|
||||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||||
.rdd.map(row => {
|
.rdd.map(row => {
|
||||||
val ip = row.getAs[String]("radius_framed_ip")
|
val ip = row.getAs[String]("radius_framed_ip")
|
||||||
framedIpAccum.add(1)
|
|
||||||
(ip, row)
|
(ip, row)
|
||||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||||
values
|
values
|
||||||
@@ -129,7 +113,7 @@ object MergeDataFrame {
|
|||||||
|
|
||||||
val fqdnArr = fqdn.split(":")(0).split("\\.")
|
val fqdnArr = fqdn.split(":")(0).split("\\.")
|
||||||
|
|
||||||
if (fqdnArr.length != 4){
|
if (fqdnArr.length != 4) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for (f <- fqdnArr) {
|
for (f <- fqdnArr) {
|
||||||
|
|||||||
@@ -2,47 +2,49 @@ package cn.ac.iie.service.update
|
|||||||
|
|
||||||
|
|
||||||
import java.util
|
import java.util
|
||||||
import scala.collection.JavaConversions._
|
|
||||||
|
|
||||||
|
import scala.collection.JavaConversions._
|
||||||
import cn.ac.iie.config.ApplicationConfig
|
import cn.ac.iie.config.ApplicationConfig
|
||||||
import cn.ac.iie.service.read.ReadHistoryArangoData
|
import cn.ac.iie.dao.BaseClickhouseData
|
||||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||||
|
|
||||||
import scala.collection.mutable
|
import scala.collection.mutable
|
||||||
import scala.collection.mutable.WrappedArray.ofRef
|
import scala.collection.mutable.WrappedArray.ofRef
|
||||||
|
|
||||||
object UpdateDocHandler {
|
object UpdateDocHandler {
|
||||||
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
|
val PROTOCOL_SET: Set[String] = Set("HTTP", "TLS", "DNS")
|
||||||
|
|
||||||
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
|
def updateMaxAttribute(hisDoc: BaseDocument, newAttribute: Long, attributeName: String): Unit = {
|
||||||
if(hisDoc.getProperties.containsKey(attributeName)){
|
if (hisDoc.getProperties.containsKey(attributeName)) {
|
||||||
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
||||||
if (newAttribute > hisAttritube){
|
if (newAttribute > hisAttritube) {
|
||||||
hisAttritube = newAttribute
|
hisAttritube = newAttribute
|
||||||
}
|
}
|
||||||
hisDoc.addAttribute(attributeName,hisAttritube)
|
hisDoc.addAttribute(attributeName, hisAttritube)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
|
def updateSumAttribute(hisDoc: BaseDocument, newAttribute: Long, attributeName: String): Unit = {
|
||||||
if (hisDoc.getProperties.containsKey(attributeName)){
|
if (hisDoc.getProperties.containsKey(attributeName)) {
|
||||||
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
||||||
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
|
hisDoc.addAttribute(attributeName, newAttribute + hisAttritube)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def replaceAttribute(hisDoc: BaseDocument,newAttribute:String,attributeName:String): Unit ={
|
def replaceAttribute(hisDoc: BaseDocument, newAttribute: String, attributeName: String): Unit = {
|
||||||
hisDoc.addAttribute(attributeName,newAttribute)
|
// hisDoc.addAttribute(attributeName,newAttribute)
|
||||||
|
hisDoc.updateAttribute(attributeName, newAttribute)
|
||||||
}
|
}
|
||||||
|
|
||||||
def separateAttributeByIpType(ipTypeList:ofRef[String],
|
def separateAttributeByIpType(ipTypeList: ofRef[String],
|
||||||
sessionCountList:ofRef[AnyRef],
|
sessionCountList: ofRef[AnyRef],
|
||||||
bytesSumList:ofRef[AnyRef]): (Long,Long,Long,Long) ={
|
bytesSumList: ofRef[AnyRef]): (Long, Long, Long, Long) = {
|
||||||
var serverSessionCount = 0L
|
var serverSessionCount = 0L
|
||||||
var serverBytesSum = 0L
|
var serverBytesSum = 0L
|
||||||
var clientSessionCount = 0L
|
var clientSessionCount = 0L
|
||||||
var clientBytesSum = 0L
|
var clientBytesSum = 0L
|
||||||
if (ipTypeList.length == sessionCountList.length && ipTypeList.length == bytesSumList.length){
|
if (ipTypeList.length == sessionCountList.length && ipTypeList.length == bytesSumList.length) {
|
||||||
sessionCountList.zip(bytesSumList).zip(ipTypeList).foreach(t => {
|
sessionCountList.zip(bytesSumList).zip(ipTypeList).foreach(t => {
|
||||||
t._2 match {
|
t._2 match {
|
||||||
case "server" =>
|
case "server" =>
|
||||||
@@ -57,51 +59,51 @@ object UpdateDocHandler {
|
|||||||
(serverSessionCount, serverBytesSum, clientSessionCount, clientBytesSum)
|
(serverSessionCount, serverBytesSum, clientSessionCount, clientBytesSum)
|
||||||
}
|
}
|
||||||
|
|
||||||
def separateAttributeByProtocol(schemaTypeList:ofRef[AnyRef],countTotalList:ofRef[AnyRef]): Map[String, Long] ={
|
def separateAttributeByProtocol(schemaTypeList: ofRef[AnyRef], countTotalList: ofRef[AnyRef]): Map[String, Long] = {
|
||||||
var protocolMap: Map[String, Long] = Map()
|
var protocolMap: Map[String, Long] = Map()
|
||||||
if (schemaTypeList.length == countTotalList.length){
|
if (schemaTypeList.length == countTotalList.length) {
|
||||||
protocolMap = schemaTypeList.zip(countTotalList).map(t => (t._1.toString,t._2.toString.toLong)).toMap
|
protocolMap = schemaTypeList.zip(countTotalList).map(t => (t._1.toString, t._2.toString.toLong)).toMap
|
||||||
}
|
}
|
||||||
PROTOCOL_SET.foreach(protocol => {
|
PROTOCOL_SET.foreach(protocol => {
|
||||||
if (!protocolMap.contains(protocol)){
|
if (!protocolMap.contains(protocol)) {
|
||||||
protocolMap += (protocol -> 0L)
|
protocolMap += (protocol -> 0L)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
protocolMap
|
protocolMap
|
||||||
}
|
}
|
||||||
|
|
||||||
def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
|
def updateProtocolAttritube(hisDoc: BaseEdgeDocument, protocolMap: Map[String, Long]): Unit = {
|
||||||
if (hisDoc.getProperties.containsKey("PROTOCOL_TYPE")){
|
if (hisDoc.getProperties.containsKey("PROTOCOL_TYPE")) {
|
||||||
var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
|
var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
|
||||||
protocolMap.foreach((t: (String, Long)) => {
|
protocolMap.foreach((t: (String, Long)) => {
|
||||||
if (t._2 > 0 && !protocolType.contains(t._1)){
|
if (t._2 > 0 && !protocolType.contains(t._1)) {
|
||||||
protocolType = protocolType.concat(","+ t._1)
|
protocolType = protocolType.concat("," + t._1)
|
||||||
}
|
}
|
||||||
val cntTotalName = t._1.concat("_CNT_TOTAL")
|
val cntTotalName = t._1.concat("_CNT_TOTAL")
|
||||||
val cntRecentName = t._1.concat("_CNT_RECENT")
|
val cntRecentName = t._1.concat("_CNT_RECENT")
|
||||||
val cntRecent = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[Long]]
|
val cntRecent = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[Long]]
|
||||||
cntRecent.update(0,t._2)
|
cntRecent.update(0, t._2)
|
||||||
updateSumAttribute(hisDoc,t._2,cntTotalName)
|
updateSumAttribute(hisDoc, t._2, cntTotalName)
|
||||||
hisDoc.addAttribute(cntRecentName,cntRecent)
|
hisDoc.addAttribute(cntRecentName, cntRecent)
|
||||||
})
|
})
|
||||||
hisDoc.addAttribute("PROTOCOL_TYPE",protocolType)
|
hisDoc.addAttribute("PROTOCOL_TYPE", protocolType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
|
def putProtocolAttritube(doc: BaseEdgeDocument, protocolMap: Map[String, Long]): Unit = {
|
||||||
val protocolTypeBuilder = new mutable.StringBuilder()
|
val protocolTypeBuilder = new mutable.StringBuilder()
|
||||||
protocolMap.foreach(t => {
|
protocolMap.foreach(t => {
|
||||||
if (t._2 > 0){
|
if (t._2 > 0) {
|
||||||
protocolTypeBuilder.append(","+t._1)
|
protocolTypeBuilder.append("," + t._1)
|
||||||
}
|
}
|
||||||
val cntTotalName = t._1.concat("_CNT_TOTAL")
|
val cntTotalName = t._1.concat("_CNT_TOTAL")
|
||||||
val cntRecentName = t._1.concat("_CNT_RECENT")
|
val cntRecentName = t._1.concat("_CNT_RECENT")
|
||||||
val cntRecent: Array[Long] = new Array[Long](ApplicationConfig.RECENT_COUNT_HOUR)
|
val cntRecent: Array[Long] = new Array[Long](ApplicationConfig.RECENT_COUNT_HOUR)
|
||||||
cntRecent.update(0,t._2)
|
cntRecent.update(0, t._2)
|
||||||
doc.addAttribute(cntTotalName,t._2)
|
doc.addAttribute(cntTotalName, t._2)
|
||||||
doc.addAttribute(cntRecentName,cntRecent)
|
doc.addAttribute(cntRecentName, cntRecent)
|
||||||
})
|
})
|
||||||
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
|
doc.addAttribute("PROTOCOL_TYPE", protocolTypeBuilder.toString().replaceFirst(",", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
def updateProtocolDocument(doc: BaseEdgeDocument): Unit = {
|
def updateProtocolDocument(doc: BaseEdgeDocument): Unit = {
|
||||||
@@ -118,37 +120,37 @@ object UpdateDocHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def mergeDistinctIp(distCipRecent:ofRef[String]): Array[String] ={
|
def mergeDistinctIp(distCipRecent: ofRef[String]): Array[String] = {
|
||||||
distCipRecent.flatMap(str => {
|
distCipRecent.flatMap(str => {
|
||||||
str.replaceAll("\\[","")
|
str.replaceAll("\\[", "")
|
||||||
.replaceAll("\\]","")
|
.replaceAll("\\]", "")
|
||||||
.replaceAll("\\'","")
|
.replaceAll("\\'", "")
|
||||||
.split(",")
|
.split(",")
|
||||||
}).distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
}).distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
def putDistinctIp(doc: BaseEdgeDocument, newDistinctIp: Array[String]): Unit = {
|
||||||
val map = newDistinctIp.map(ip => {
|
val map = newDistinctIp.map(ip => {
|
||||||
(ip, ReadHistoryArangoData.currentHour)
|
(ip, BaseClickhouseData.currentHour)
|
||||||
}).toMap
|
}).toMap
|
||||||
doc.addAttribute("DIST_CIP",map.keys.toArray)
|
doc.addAttribute("DIST_CIP", map.keys.toArray)
|
||||||
doc.addAttribute("DIST_CIP_TS",map.values.toArray)
|
doc.addAttribute("DIST_CIP_TS", map.values.toArray)
|
||||||
}
|
}
|
||||||
|
|
||||||
def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
|
def updateDistinctIp(hisDoc: BaseEdgeDocument, newDistinctIp: Array[String]): Unit = {
|
||||||
if (hisDoc.getProperties.containsKey("DIST_CIP") && hisDoc.getProperties.containsKey("DIST_CIP_TS")){
|
if (hisDoc.getProperties.containsKey("DIST_CIP") && hisDoc.getProperties.containsKey("DIST_CIP_TS")) {
|
||||||
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
|
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
|
||||||
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[util.ArrayList[Long]]
|
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[util.ArrayList[Long]]
|
||||||
if (hisDistCip.length == hisDistCipTs.length){
|
if (hisDistCip.length == hisDistCipTs.length) {
|
||||||
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
|
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
|
||||||
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
|
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq: _*)
|
||||||
newDistinctIp.foreach(cip => {
|
newDistinctIp.foreach(cip => {
|
||||||
muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
|
muDistCipToTsMap.put(cip, BaseClickhouseData.currentHour)
|
||||||
})
|
})
|
||||||
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
|
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
|
||||||
hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
|
hisDoc.addAttribute("DIST_CIP", resultMap.keys.toArray)
|
||||||
hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray)
|
hisDoc.addAttribute("DIST_CIP_TS", resultMap.values.toArray)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,13 +19,13 @@ object UpdateDocument {
|
|||||||
|
|
||||||
def update(): Unit = {
|
def update(): Unit = {
|
||||||
try {
|
try {
|
||||||
updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
|
// updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
|
||||||
|
|
||||||
updateDocument("SUBSCRIBER",getVertexSubidRow,mergeVertexSubid)
|
updateDocument("SUBSCRIBER", getVertexSubidRow, mergeVertexSubid)
|
||||||
|
|
||||||
insertFrameIp()
|
insertFrameIp()
|
||||||
|
|
||||||
updateDocument("R_LOCATE_SUBSCRIBER2IP",getRelationSubidLocateIpRow,mergeRelationSubidLocateIp)
|
updateDocument("R_LOCATE_SUBSCRIBER2IP", getRelationSubidLocateIpRow, mergeRelationSubidLocateIp)
|
||||||
|
|
||||||
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, mergeRelationFqdnLocateIp)
|
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, mergeRelationFqdnLocateIp)
|
||||||
|
|
||||||
@@ -41,18 +41,23 @@ object UpdateDocument {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private def updateDocument[T <: BaseDocument](collName: String,
|
private def updateDocument[T <: BaseDocument](collName: String,
|
||||||
getDocumentRow: ((String, (Option[T], Option[Row]))) => T,
|
getDocumentRow: ((String, (Option[T], Row))) => T,
|
||||||
getJoinRdd: () => RDD[(String, (Option[T], Option[Row]))]
|
getJoinRdd: () => RDD[(String, (Option[T], Row))]
|
||||||
): Unit = {
|
): Unit = {
|
||||||
try {
|
try {
|
||||||
val start = System.currentTimeMillis()
|
val start = System.currentTimeMillis()
|
||||||
val joinRdd = getJoinRdd()
|
val joinRdd = getJoinRdd()
|
||||||
|
|
||||||
|
val fqdnAccmu = SparkSessionUtil.getLongAccumulator(s"$collName Accumulator")
|
||||||
|
|
||||||
joinRdd.foreachPartition(iter => {
|
joinRdd.foreachPartition(iter => {
|
||||||
val resultDocumentList = new util.ArrayList[T]
|
val resultDocumentList = new util.ArrayList[T]
|
||||||
var i = 0
|
var i = 0
|
||||||
iter.foreach(row => {
|
iter.foreach(row => {
|
||||||
val document = getDocumentRow(row)
|
val document: T = getDocumentRow(row)
|
||||||
if (document != null){
|
if (document != null) {
|
||||||
|
fqdnAccmu.add(1)
|
||||||
|
|
||||||
resultDocumentList.add(document)
|
resultDocumentList.add(document)
|
||||||
}
|
}
|
||||||
i += 1
|
i += 1
|
||||||
@@ -67,14 +72,17 @@ object UpdateDocument {
|
|||||||
LOG.warn(s"更新$collName:" + i)
|
LOG.warn(s"更新$collName:" + i)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
LOG.warn(s"更新$collName 条数:${fqdnAccmu.value}")
|
||||||
|
|
||||||
val last = System.currentTimeMillis()
|
val last = System.currentTimeMillis()
|
||||||
LOG.warn(s"更新$collName 时间:${last - start}")
|
LOG.warn(s"更新$collName 时间:${last - start}")
|
||||||
} catch {
|
} catch {
|
||||||
case e: Exception => e.printStackTrace()
|
case e: Exception => e.printStackTrace()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private def insertFrameIp(): Unit ={
|
private def insertFrameIp(): Unit = {
|
||||||
mergeVertexFrameIp.foreachPartition(iter => {
|
mergeVertexFrameIp.foreachPartition(iter => {
|
||||||
val resultDocumentList = new util.ArrayList[BaseDocument]
|
val resultDocumentList = new util.ArrayList[BaseDocument]
|
||||||
var i = 0
|
var i = 0
|
||||||
@@ -95,15 +103,15 @@ object UpdateDocument {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getVertexFrameipRow(row: Row): BaseDocument ={
|
private def getVertexFrameipRow(row: Row): BaseDocument = {
|
||||||
val ip = row.getAs[String]("radius_framed_ip")
|
val ip = row.getAs[String]("radius_framed_ip")
|
||||||
val document = new BaseDocument()
|
val document = new BaseDocument()
|
||||||
document.setKey(ip)
|
document.setKey(ip)
|
||||||
document.addAttribute("IP",ip)
|
document.addAttribute("IP", ip)
|
||||||
document
|
document
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument ={
|
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Row))): BaseEdgeDocument = {
|
||||||
|
|
||||||
val subidLocIpDocOpt = joinRow._2._1
|
val subidLocIpDocOpt = joinRow._2._1
|
||||||
var subidLocIpDoc = subidLocIpDocOpt match {
|
var subidLocIpDoc = subidLocIpDocOpt match {
|
||||||
@@ -111,83 +119,83 @@ object UpdateDocument {
|
|||||||
case None => null
|
case None => null
|
||||||
}
|
}
|
||||||
|
|
||||||
val subidLocIpRowOpt = joinRow._2._2
|
val subidLocIpRow = joinRow._2._2
|
||||||
|
|
||||||
val subidLocIpRow = subidLocIpRowOpt match {
|
// val subidLocIpRow = subidLocIpRowOpt match {
|
||||||
case Some(r) => r
|
// case Some(r) => r
|
||||||
case None => null
|
// case None => null
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (subidLocIpRow != null){
|
if (subidLocIpRow != null) {
|
||||||
val subId = subidLocIpRow.getAs[String]("common_subscriber_id")
|
val subId = subidLocIpRow.getAs[String]("common_subscriber_id")
|
||||||
val ip = subidLocIpRow.getAs[String]("radius_framed_ip")
|
val ip = subidLocIpRow.getAs[String]("radius_framed_ip")
|
||||||
val lastFoundTime = subidLocIpRow.getAs[Long]("LAST_FOUND_TIME")
|
val lastFoundTime = subidLocIpRow.getAs[Long]("LAST_FOUND_TIME")
|
||||||
val firstFoundTime = subidLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
|
val firstFoundTime = subidLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||||
|
|
||||||
val key = subId.concat("-"+ip)
|
val key = subId.concat("-" + ip)
|
||||||
if (subidLocIpDoc != null){
|
if (subidLocIpDoc != null) {
|
||||||
updateMaxAttribute(subidLocIpDoc,lastFoundTime,"LAST_FOUND_TIME")
|
updateMaxAttribute(subidLocIpDoc, lastFoundTime, "LAST_FOUND_TIME")
|
||||||
} else {
|
} else {
|
||||||
subidLocIpDoc = new BaseEdgeDocument()
|
subidLocIpDoc = new BaseEdgeDocument()
|
||||||
subidLocIpDoc.setKey(key)
|
subidLocIpDoc.setKey(key)
|
||||||
subidLocIpDoc.setFrom("SUBSCRIBER/" + subId)
|
subidLocIpDoc.setFrom("SUBSCRIBER/" + subId)
|
||||||
subidLocIpDoc.setTo("IP/" + ip)
|
subidLocIpDoc.setTo("IP/" + ip)
|
||||||
subidLocIpDoc.addAttribute("SUBSCRIBER",subId)
|
subidLocIpDoc.addAttribute("SUBSCRIBER", subId)
|
||||||
subidLocIpDoc.addAttribute("IP",ip)
|
subidLocIpDoc.addAttribute("IP", ip)
|
||||||
subidLocIpDoc.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
|
subidLocIpDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
|
||||||
subidLocIpDoc.addAttribute("LAST_FOUND_TIME",lastFoundTime)
|
subidLocIpDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
subidLocIpDoc
|
subidLocIpDoc
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument ={
|
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
|
||||||
val subidDocOpt = joinRow._2._1
|
val subidDocOpt = joinRow._2._1
|
||||||
var subidDoc = subidDocOpt match {
|
var subidDoc = subidDocOpt match {
|
||||||
case Some(doc) => doc
|
case Some(doc) => doc
|
||||||
case None => null
|
case None => null
|
||||||
}
|
}
|
||||||
|
|
||||||
val subidRowOpt = joinRow._2._2
|
val subidRow = joinRow._2._2
|
||||||
|
|
||||||
val subidRow = subidRowOpt match {
|
// val subidRow = subidRowOpt match {
|
||||||
case Some(r) => r
|
// case Some(r) => r
|
||||||
case None => null
|
// case None => null
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (subidRow != null){
|
if (subidRow != null) {
|
||||||
val subId = subidRow.getAs[String]("common_subscriber_id")
|
val subId = subidRow.getAs[String]("common_subscriber_id")
|
||||||
val subLastFoundTime = subidRow.getAs[Long]("LAST_FOUND_TIME")
|
val subLastFoundTime = subidRow.getAs[Long]("LAST_FOUND_TIME")
|
||||||
val subFirstFoundTime = subidRow.getAs[Long]("FIRST_FOUND_TIME")
|
val subFirstFoundTime = subidRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||||
if (subidDoc != null){
|
if (subidDoc != null) {
|
||||||
updateMaxAttribute(subidDoc,subLastFoundTime,"LAST_FOUND_TIME")
|
updateMaxAttribute(subidDoc, subLastFoundTime, "LAST_FOUND_TIME")
|
||||||
} else {
|
} else {
|
||||||
subidDoc = new BaseDocument()
|
subidDoc = new BaseDocument()
|
||||||
subidDoc.setKey(subId)
|
subidDoc.setKey(subId)
|
||||||
subidDoc.addAttribute("SUBSCRIBER",subId)
|
subidDoc.addAttribute("SUBSCRIBER", subId)
|
||||||
subidDoc.addAttribute("FIRST_FOUND_TIME",subFirstFoundTime)
|
subidDoc.addAttribute("FIRST_FOUND_TIME", subFirstFoundTime)
|
||||||
subidDoc.addAttribute("LAST_FOUND_TIME",subLastFoundTime)
|
subidDoc.addAttribute("LAST_FOUND_TIME", subLastFoundTime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
subidDoc
|
subidDoc
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
|
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
|
||||||
val fqdnDocOpt = joinRow._2._1
|
val fqdnDocOpt = joinRow._2._1
|
||||||
var fqdnDoc = fqdnDocOpt match {
|
var fqdnDoc = fqdnDocOpt match {
|
||||||
case Some(doc) => doc
|
case Some(doc) => doc
|
||||||
case None => null
|
case None => null
|
||||||
}
|
}
|
||||||
|
|
||||||
val fqdnRowOpt = joinRow._2._2
|
val fqdnRow: Row = joinRow._2._2
|
||||||
|
|
||||||
val fqdnRow = fqdnRowOpt match {
|
// val fqdnRow = fqdnRowOpt match {
|
||||||
case Some(r) => r
|
// case Some(r) => r
|
||||||
case None => null
|
// case None => null
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (fqdnRow != null){
|
if (fqdnRow != null) {
|
||||||
val fqdn = fqdnRow.getAs[String]("FQDN")
|
val fqdn = fqdnRow.getAs[String]("FQDN")
|
||||||
val lastFoundTime = fqdnRow.getAs[Long]("LAST_FOUND_TIME")
|
val lastFoundTime = fqdnRow.getAs[Long]("LAST_FOUND_TIME")
|
||||||
val firstFoundTime = fqdnRow.getAs[Long]("FIRST_FOUND_TIME")
|
val firstFoundTime = fqdnRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||||
@@ -205,21 +213,21 @@ object UpdateDocument {
|
|||||||
fqdnDoc
|
fqdnDoc
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
|
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
|
||||||
val ipDocOpt = joinRow._2._1
|
val ipDocOpt = joinRow._2._1
|
||||||
var ipDoc = ipDocOpt match {
|
var ipDoc = ipDocOpt match {
|
||||||
case Some(doc) => doc
|
case Some(doc) => doc
|
||||||
case None => null
|
case None => null
|
||||||
}
|
}
|
||||||
|
|
||||||
val ipRowOpt = joinRow._2._2
|
val ipRow = joinRow._2._2
|
||||||
|
|
||||||
val ipRow = ipRowOpt match {
|
// val ipRow = ipRowOpt match {
|
||||||
case Some(r) => r
|
// case Some(r) => r
|
||||||
case None => null
|
// case None => null
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (ipRow != null){
|
if (ipRow != null) {
|
||||||
val ip = ipRow.getAs[String]("IP")
|
val ip = ipRow.getAs[String]("IP")
|
||||||
val firstFoundTime = ipRow.getAs[Long]("FIRST_FOUND_TIME")
|
val firstFoundTime = ipRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||||
val lastFoundTime = ipRow.getAs[Long]("LAST_FOUND_TIME")
|
val lastFoundTime = ipRow.getAs[Long]("LAST_FOUND_TIME")
|
||||||
@@ -235,7 +243,7 @@ object UpdateDocument {
|
|||||||
updateSumAttribute(ipDoc, sepAttributeTuple._2, "SERVER_BYTES_SUM")
|
updateSumAttribute(ipDoc, sepAttributeTuple._2, "SERVER_BYTES_SUM")
|
||||||
updateSumAttribute(ipDoc, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
|
updateSumAttribute(ipDoc, sepAttributeTuple._3, "CLIENT_SESSION_COUNT")
|
||||||
updateSumAttribute(ipDoc, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
|
updateSumAttribute(ipDoc, sepAttributeTuple._4, "CLIENT_BYTES_SUM")
|
||||||
replaceAttribute(ipDoc,linkInfo,"COMMON_LINK_INFO")
|
replaceAttribute(ipDoc, linkInfo, "COMMON_LINK_INFO")
|
||||||
} else {
|
} else {
|
||||||
ipDoc = new BaseDocument
|
ipDoc = new BaseDocument
|
||||||
ipDoc.setKey(ip)
|
ipDoc.setKey(ip)
|
||||||
@@ -253,7 +261,7 @@ object UpdateDocument {
|
|||||||
ipDoc
|
ipDoc
|
||||||
}
|
}
|
||||||
|
|
||||||
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument = {
|
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Row))): BaseEdgeDocument = {
|
||||||
|
|
||||||
val fqdnLocIpDocOpt = joinRow._2._1
|
val fqdnLocIpDocOpt = joinRow._2._1
|
||||||
var fqdnLocIpDoc = fqdnLocIpDocOpt match {
|
var fqdnLocIpDoc = fqdnLocIpDocOpt match {
|
||||||
@@ -261,18 +269,18 @@ object UpdateDocument {
|
|||||||
case None => null
|
case None => null
|
||||||
}
|
}
|
||||||
|
|
||||||
val fqdnLocIpRowOpt = joinRow._2._2
|
val fqdnLocIpRow = joinRow._2._2
|
||||||
|
|
||||||
val fqdnLocIpRow = fqdnLocIpRowOpt match {
|
// val fqdnLocIpRow = fqdnLocIpRowOpt match {
|
||||||
case Some(r) => r
|
// case Some(r) => r
|
||||||
case None => null
|
// case None => null
|
||||||
}
|
// }
|
||||||
|
|
||||||
if (fqdnLocIpDoc != null){
|
if (fqdnLocIpDoc != null) {
|
||||||
updateProtocolDocument(fqdnLocIpDoc)
|
updateProtocolDocument(fqdnLocIpDoc)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fqdnLocIpRow != null){
|
if (fqdnLocIpRow != null) {
|
||||||
val fqdn = fqdnLocIpRow.getAs[String]("FQDN")
|
val fqdn = fqdnLocIpRow.getAs[String]("FQDN")
|
||||||
val serverIp = fqdnLocIpRow.getAs[String]("common_server_ip")
|
val serverIp = fqdnLocIpRow.getAs[String]("common_server_ip")
|
||||||
val firstFoundTime = fqdnLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
|
val firstFoundTime = fqdnLocIpRow.getAs[Long]("FIRST_FOUND_TIME")
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
package cn.ac.iie.spark.rdd
|
package cn.ac.iie.spark.rdd
|
||||||
|
|
||||||
|
import java.util
|
||||||
|
|
||||||
import scala.collection.JavaConverters.asScalaIteratorConverter
|
import scala.collection.JavaConverters.asScalaIteratorConverter
|
||||||
import cn.ac.iie.config.ApplicationConfig
|
import cn.ac.iie.config.ApplicationConfig
|
||||||
import cn.ac.iie.service.update.UpdateDocument
|
import cn.ac.iie.service.update.UpdateDocument
|
||||||
import cn.ac.iie.spark
|
import cn.ac.iie.spark
|
||||||
import cn.ac.iie.spark.partition.QueryArangoPartition
|
import cn.ac.iie.spark.partition.QueryArangoPartition
|
||||||
import com.arangodb.ArangoCursor
|
import com.arangodb.ArangoCursor
|
||||||
|
import com.arangodb.model.AqlQueryOptions
|
||||||
|
import com.arangodb.util.MapBuilder
|
||||||
import org.apache.spark.{Partition, SparkContext, TaskContext}
|
import org.apache.spark.{Partition, SparkContext, TaskContext}
|
||||||
import org.apache.spark.rdd.RDD
|
import org.apache.spark.rdd.RDD
|
||||||
import org.slf4j.LoggerFactory
|
import org.slf4j.LoggerFactory
|
||||||
@@ -38,13 +42,15 @@ class ArangoRdd[T: ClassTag](@transient override val sparkContext: SparkContext,
|
|||||||
|
|
||||||
var arangoCursor:ArangoCursor[T] = null
|
var arangoCursor:ArangoCursor[T] = null
|
||||||
val arangoDB = spark.createArangoBuilder(options).build()
|
val arangoDB = spark.createArangoBuilder(options).build()
|
||||||
|
val bindVars: util.Map[String, AnyRef] = new MapBuilder().get
|
||||||
|
val queryOptions: AqlQueryOptions = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL)
|
||||||
try {
|
try {
|
||||||
val offset = split.offset
|
val offset = split.offset
|
||||||
val separate = split.separate
|
val separate = split.separate
|
||||||
val collection = options.collection
|
val collection = options.collection
|
||||||
val sql = s"FOR doc IN $collection limit $offset,$separate RETURN doc"
|
val sql = s"FOR doc IN $collection limit $offset,$separate RETURN doc"
|
||||||
LOG.info(sql)
|
LOG.info(sql)
|
||||||
arangoCursor = arangoDB.db(options.database).query(sql,clazz.runtimeClass.asInstanceOf[Class[T]])
|
arangoCursor = arangoDB.db(options.database).query(sql,bindVars,queryOptions,clazz.runtimeClass.asInstanceOf[Class[T]])
|
||||||
}catch {
|
}catch {
|
||||||
case e: Exception => LOG.error(s"创建Cursor异常:${e.getMessage}")
|
case e: Exception => LOG.error(s"创建Cursor异常:${e.getMessage}")
|
||||||
}finally {
|
}finally {
|
||||||
@@ -71,6 +77,9 @@ class ArangoRdd[T: ClassTag](@transient override val sparkContext: SparkContext,
|
|||||||
try {
|
try {
|
||||||
val longs = arangoDB.db(options.database).query(sql, classOf[Long])
|
val longs = arangoDB.db(options.database).query(sql, classOf[Long])
|
||||||
while (longs.hasNext) cnt = longs.next
|
while (longs.hasNext) cnt = longs.next
|
||||||
|
if (cnt > ApplicationConfig.ARANGODB_TOTAL_NUM){
|
||||||
|
cnt = ApplicationConfig.ARANGODB_TOTAL_NUM
|
||||||
|
}
|
||||||
} catch {
|
} catch {
|
||||||
case e: Exception => LOG.error(sql + s"执行异常:${e.getMessage}")
|
case e: Exception => LOG.error(sql + s"执行异常:${e.getMessage}")
|
||||||
}finally {
|
}finally {
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ object SparkSessionUtil {
|
|||||||
spark
|
spark
|
||||||
}
|
}
|
||||||
|
|
||||||
def getContext: SparkContext = {
|
private def getContext: SparkContext = {
|
||||||
@transient var sc: SparkContext = null
|
@transient var sc: SparkContext = null
|
||||||
if (sparkContext == null) sc = spark.sparkContext
|
if (sparkContext == null) sc = spark.sparkContext
|
||||||
sc
|
sc
|
||||||
|
|||||||
Reference in New Issue
Block a user