6 Commits

Author SHA1 Message Date
wanglihui
264afdaa3e 增加radius是否活跃判断条件。 2021-10-25 14:47:22 +08:00
wanglihui
423e9d9b44 统一日志术语,修改表名 2021-09-24 11:06:16 +08:00
wanglihui
d61fbee61a 统一日志术语,修改表名 2021-09-16 11:08:01 +08:00
wanglihui
2f7cceb826 格式化代码 2021-08-02 18:49:43 +08:00
wanglihui
51d2549902 修复class tag无法编译bug。 2021-04-14 14:20:41 +08:00
wanglihui
f0cebd8e1c 格式化代码 2021-03-23 11:26:55 +08:00
22 changed files with 276 additions and 624 deletions

View File

@@ -14,7 +14,7 @@ public class ApplicationConfig {
public static final Integer ARANGODB_BATCH = ConfigUtils.getIntProperty( "arangoDB.batch");
public static final Integer UPDATE_ARANGO_BATCH = ConfigUtils.getIntProperty("update.arango.batch");
public static final String ARANGODB_READ_LIMIT = ConfigUtils.getStringProperty("arangoDB.read.limit");
public static final Long ARANGODB_READ_LIMIT = ConfigUtils.getLongProperty("arangoDB.read.limit");
public static final Integer THREAD_POOL_NUMBER = ConfigUtils.getIntProperty( "thread.pool.number");
public static final Integer THREAD_AWAIT_TERMINATION_TIME = ConfigUtils.getIntProperty( "thread.await.termination.time");

View File

@@ -129,6 +129,9 @@ public class BaseArangoData {
private String getQuerySql(Long cnt,int threadNumber, String table){
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER + 1;
long offsetNum = threadNumber * sepNum;
if (sepNum > ApplicationConfig.ARANGODB_READ_LIMIT){
sepNum = ApplicationConfig.ARANGODB_READ_LIMIT;
}
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
}

View File

@@ -44,8 +44,8 @@ public class UpdateGraphData {
long start = System.currentTimeMillis();
try {
updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
// updateDocument(newVertexFqdnMap, historyVertexFqdnMap, "FQDN", Fqdn.class,BaseDocument.class,
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
updateDocument(newVertexIpMap,historyVertexIpMap,"IP", Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);
@@ -79,8 +79,8 @@ public class UpdateGraphData {
long start = System.currentTimeMillis();
try {
updateDocument("FQDN", Fqdn.class,BaseDocument.class,
ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
// updateDocument("FQDN", Fqdn.class,BaseDocument.class,
// ReadClickhouseData::getVertexFqdnSql,ReadClickhouseData::getVertexFqdnDocument);
updateDocument("IP", Ip.class,BaseDocument.class,
ReadClickhouseData::getVertexIpSql,ReadClickhouseData::getVertexIpDocument);

View File

@@ -257,40 +257,40 @@ public class ReadClickhouseData {
public static String getVertexFqdnSql() {
String where = "common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String sslSql = "SELECT ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni";
String httpSql = "SELECT http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host";
String sslSql = "SELECT ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni";
String httpSql = "SELECT http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host";
return "SELECT FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME FROM ((" + sslSql + ") UNION ALL (" + httpSql + ")) GROUP BY FQDN HAVING FQDN != ''";
}
public static String getVertexIpSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.connection_record_log where " + where + " group by IP";
String clientIpSql = "SELECT common_client_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_c2s) as common_link_info,'client' as ip_type FROM tsg_galaxy_v3.session_record where " + where + " group by IP";
String serverIpSql = "SELECT common_server_ip AS IP, MIN(common_recv_time) AS FIRST_FOUND_TIME,MAX(common_recv_time) AS LAST_FOUND_TIME,count(*) as SESSION_COUNT,sum(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,groupUniqArray(2)(common_link_info_s2c) as common_link_info,'server' as ip_type FROM tsg_galaxy_v3.session_record where " + where + " group by IP";
return "SELECT * FROM((" + clientIpSql + ") UNION ALL (" + serverIpSql + "))";
}
public static String getRelationshipFqdnAddressIpSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
String sslSql = "SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'TLS' AS schema_type FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip";
String httpSql = "SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,groupUniqArray("+DISTINCT_CLIENT_IP_NUM+")(common_client_ip) AS DIST_CIP_RECENT,'HTTP' AS schema_type FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip";
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
}
public static String getRelationshipIpVisitFqdnSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime;
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
String sslSql = "SELECT ssl_sni AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'TLS' AS schema_type FROM tsg_galaxy_v3.connection_record_log WHERE common_schema_type = 'SSL' GROUP BY ssl_sni,common_client_ip";
String httpSql = "SELECT http_host AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'HTTP' AS schema_type FROM tsg_galaxy_v3.session_record WHERE " + where + " and common_schema_type = 'HTTP' GROUP BY http_host,common_client_ip";
String sslSql = "SELECT ssl_sni AS FQDN,common_client_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,'TLS' AS schema_type FROM tsg_galaxy_v3.session_record WHERE common_schema_type = 'SSL' GROUP BY ssl_sni,common_client_ip";
return "SELECT * FROM ((" + sslSql + ") UNION ALL (" + httpSql + "))WHERE FQDN != ''";
}
public static String getVertexSubscriberSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
return "SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log WHERE" + where + " GROUP BY common_subscriber_id";
return "SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record WHERE" + where + " GROUP BY common_subscriber_id";
}
public static String getRelationshipSubsciberLocateIpSql() {
String where = " common_recv_time >= " + minTime + " AND common_recv_time < " + maxTime + " AND common_subscriber_id != '' AND radius_framed_ip != '' AND radius_packet_type = 4 AND radius_acct_status_type = 1";
return "SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME,COUNT(*) as COUNT_TOTAL FROM radius_record_log WHERE" + where + " GROUP BY common_subscriber_id,radius_framed_ip";
return "SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME,COUNT(*) as COUNT_TOTAL FROM radius_record WHERE" + where + " GROUP BY common_subscriber_id,radius_framed_ip";
}
private static long[] getTimeLimit() {

View File

@@ -33,10 +33,10 @@ public class Ip extends Vertex {
}
private void updateIpByType(BaseDocument newDocument, BaseDocument historyDocument) {
putSumAttribute(newDocument, historyDocument, "CLIENT_SESSION_COUNT");
putSumAttribute(newDocument, historyDocument, "CLIENT_BYTES_SUM");
putSumAttribute(newDocument, historyDocument, "SERVER_SESSION_COUNT");
putSumAttribute(newDocument, historyDocument, "SERVER_BYTES_SUM");
// putSumAttribute(newDocument, historyDocument, "CLIENT_SESSION_COUNT");
// putSumAttribute(newDocument, historyDocument, "CLIENT_BYTES_SUM");
// putSumAttribute(newDocument, historyDocument, "SERVER_SESSION_COUNT");
// putSumAttribute(newDocument, historyDocument, "SERVER_BYTES_SUM");
}
}

View File

@@ -6,7 +6,6 @@ public class IpLearningApplicationTest {
public static void main(String[] args) {
UpdateGraphData updateGraphData = new UpdateGraphData();
// updateGraphData.updateArango();
updateGraphData.updateArango2();
}

View File

@@ -1,15 +1,13 @@
#arangoDB参数配置
arangoDB.host=192.168.40.182
#arangoDB.host=192.168.40.224
arangoDB.host=192.168.44.12
arangoDB.port=8529
arangoDB.user=upsert
arangoDB.password=ceiec2018
arangoDB.DB.name=ip-learning-test
#arangoDB.DB.name=tsg_galaxy_v3
arangoDB.user=root
arangoDB.password=ceiec2019
arangoDB.DB.name=tsg_galaxy_v3
arangoDB.batch=100000
arangoDB.ttl=3600
arangoDB.read.limit=
arangoDB.read.limit=10000000
update.arango.batch=10000
thread.pool.number=10

View File

@@ -1,9 +1,7 @@
drivers=ru.yandex.clickhouse.ClickHouseDriver
mdb.user=default
db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
mdb.password=111111
#db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=300000
#mdb.password=ceiec2019
db.id=192.168.44.67:8123/tsg_galaxy_v3?socket_timeout=3600000
mdb.password=ceiec2019
initialsize=1
minidle=1
maxactive=50

View File

@@ -1,53 +0,0 @@
package cn.ac.iie;
import cn.ac.iie.dao.BaseArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.junit.After;
import org.junit.Test;
import java.util.Enumeration;
import java.util.concurrent.ConcurrentHashMap;
public class TestReadArango {
private static ExecutorThreadPool pool = ExecutorThreadPool.getInstance();
private static ArangoDBConnect arangoManger = ArangoDBConnect.getInstance();
private static BaseArangoData baseArangoData = new BaseArangoData();
@Test
public void testReadFqdnFromArango() {
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyData =
baseArangoData.readHistoryData("FQDN", BaseDocument.class);
printMap(historyData);
}
@Test
public void testReadFqdnLocIpFromArango() {
ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> ip =
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", BaseEdgeDocument.class);
printMap(ip);
}
private <T extends BaseDocument> void printMap(ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyData) {
ConcurrentHashMap<String, T> map = historyData.get(2);
Enumeration<String> keys = map.keys();
while (keys.hasMoreElements()) {
String key = keys.nextElement();
T document = map.get(key);
System.out.println(document.toString());
}
}
@After
public void clearSource() {
pool.shutdown();
arangoManger.clean();
}
}

View File

@@ -1,75 +0,0 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.service.read.ReadHistoryArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* 获取arangoDB历史数据
*
* @author wlh
*/
public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
public <T extends BaseDocument> void readHistoryData(String table,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
Class<T> type) {
try {
LOG.warn("开始更新" + table);
long start = System.currentTimeMillis();
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
historyMap.put(i, new ConcurrentHashMap<>());
}
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
// long[] timeRange = getTimeRange(table);
Long countTotal = getCountTotal(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
// String sql = getQuerySql(timeRange, i, table);
String sql = getQuerySql(countTotal, i, table);
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.warn("读取" + table + " arangoDB 共耗时:" + (last - start));
} catch (Exception e) {
e.printStackTrace();
}
}
private Long getCountTotal(String table){
long start = System.currentTimeMillis();
Long cnt = 0L;
String sql = "RETURN LENGTH("+table+")";
try {
ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
while (longs.hasNext()){
cnt = longs.next();
}
}catch (Exception e){
LOG.error(sql +"执行异常");
}
long last = System.currentTimeMillis();
LOG.info(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
return cnt;
}
private String getQuerySql(Long cnt,int threadNumber, String table){
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER() + 1;
long offsetNum = threadNumber * sepNum;
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
}
}

View File

@@ -1,125 +0,0 @@
package cn.ac.iie.service.read;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* @author wlh
* 多线程全量读取arangoDb历史数据封装到map
*/
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
public static final HashSet<String> PROTOCOL_SET;
static {
PROTOCOL_SET = new HashSet<>();
PROTOCOL_SET.add("HTTP");
PROTOCOL_SET.add("TLS");
PROTOCOL_SET.add("DNS");
}
private ArangoDBConnect arangoConnect;
private String query;
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
private Class<T> type;
private String table;
private CountDownLatch countDownLatch;
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
String query,
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
Class<T> type,
String table,
CountDownLatch countDownLatch) {
this.arangoConnect = arangoConnect;
this.query = query;
this.map = map;
this.type = type;
this.table = table;
this.countDownLatch = countDownLatch;
}
@Override
public void run() {
try {
long s = System.currentTimeMillis();
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
if (docs != null) {
List<T> baseDocuments = docs.asListRemaining();
int i = 0;
for (T doc : baseDocuments) {
String key = doc.getKey();
switch (table) {
case "R_LOCATE_FQDN2IP":
updateProtocolDocument(doc);
deleteDistinctClientIpByTime(doc);
break;
case "R_VISIT_IP2FQDN":
updateProtocolDocument(doc);
break;
default:
}
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER();
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
tmpMap.put(key, doc);
i++;
}
long l = System.currentTimeMillis();
LOG.warn(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
}
}catch (Exception e){
e.printStackTrace();
}finally {
countDownLatch.countDown();
LOG.warn("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
}
}
private void updateProtocolDocument(T doc) {
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
for (String protocol : PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
cntRecentsDst[0] = 0L;
doc.addAttribute(protocolRecent, cntRecentsDst);
}
}
}
private void deleteDistinctClientIpByTime(T doc) {
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
Collections.sort(distCipTs);
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
String[] distCipArr = new String[index];
long[] disCipTsArr = new long[index];
if (distCip.size() + 1 == distCipTs.size()){
for (int i = 0; i < index; i++) {
distCipArr[i] = distCip.get(i);
disCipTsArr[i] = distCipTs.get(i);
}
}
doc.updateAttribute("DIST_CIP", distCipArr);
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
}
}

View File

@@ -22,6 +22,7 @@ public class ArangoDBConnect {
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
private static ArangoDB arangoDB = null;
private static ArangoDBConnect conn = null;
static {
getArangoDatabase();
}
@@ -105,12 +106,11 @@ public class ArangoDBConnect {
}
}
} catch (Exception e) {
System.out.println("更新失败:"+e.toString());
LOG.error("更新失败:" + e.toString());
} finally {
docOverwrite.clear();
}
}
}

View File

@@ -13,27 +13,32 @@ spark.read.clickhouse.password=ceiec2019
spark.read.clickhouse.numPartitions=5
spark.read.clickhouse.fetchsize=10000
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
spark.read.clickhouse.session.table=session_record
spark.read.clickhouse.radius.table=radius_record
clickhouse.socket.timeout=300000
#arangoDB配置
arangoDB.host=192.168.40.182
#arangoDB.host=192.168.40.223
arangoDB.host=192.168.44.12
arangoDB.port=8529
arangoDB.user=upsert
arangoDB.password=ceiec2018
arangoDB.DB.name=ip-learning-test-0
arangoDB.user=root
#arangoDB.password=galaxy_2019
arangoDB.password=ceiec2019
arangoDB.DB.name=tsg_galaxy_v3_test
#arangoDB.DB.name=iplearn_media_domain
arangoDB.ttl=3600
thread.pool.number=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围
clickhouse.time.limit.type=1
read.clickhouse.max.time=1603785961
read.clickhouse.min.time=1603354682
clickhouse.time.limit.type=0
read.clickhouse.max.time=1608518990
read.clickhouse.min.time=1604851201
arangoDB.read.limit=1
update.arango.batch=10000
distinct.client.ip.num=10000
recent.count.hour=24
update.interval=3600
arangodb.total.num=20000000

View File

@@ -4,20 +4,15 @@ log4j.logger.org.apache.http.wire=OFF
#Log4j
log4j.rootLogger=info,console,file
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=warn
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=warn
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><D3A6>Ŀ<EFBFBD><C4BF>
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
log4j.appender.file.file=./logs/ip-learning-application.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout

View File

@@ -20,6 +20,8 @@ object ApplicationConfig {
val SPARK_READ_CLICKHOUSE_PASSWORD: String = config.getString("spark.read.clickhouse.password")
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
val SPARK_READ_CLICKHOUSE_SESSION_TABLE: String = config.getString("spark.read.clickhouse.session.table")
val SPARK_READ_CLICKHOUSE_RADIUS_TABLE: String = config.getString("spark.read.clickhouse.radius.table")
val ARANGODB_HOST: String= config.getString("arangoDB.host")
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
@@ -36,11 +38,12 @@ object ApplicationConfig {
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
val ARANGODB_READ_LIMIT: Int = config.getInt("arangoDB.read.limit")
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
val UPDATE_INTERVAL: Int = config.getInt("update.interval")
val ARANGODB_TOTAL_NUM: Long = config.getLong("arangodb.total.num")
}

View File

@@ -0,0 +1,22 @@
package cn.ac.iie.dao
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.spark.ArangoSpark
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions}
import cn.ac.iie.utils.SparkSessionUtil.sparkContext
import org.slf4j.LoggerFactory
import scala.reflect.ClassTag
object BaseArangoData {
private val LOG = LoggerFactory.getLogger(BaseArangoData.getClass)
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
def loadArangoRdd[T: ClassTag](name:String): ArangoRdd[T] ={
val value = ArangoSpark.load[T](sparkContext, name, options)
LOG.warn(s"读取$name arangoDb:${value.count()}")
value
}
}

View File

@@ -32,149 +32,22 @@ object BaseClickhouseData {
dataFrame
}
def loadConnectionDataFromCk(): Unit ={
def getVertexFqdnDf: DataFrame = {
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|FROM
| connection_record_log
|WHERE $where) as dbtable
""".stripMargin
LOG.warn(sql)
initClickhouseData(sql)
}
private def loadRadiusDataFromCk(): Unit ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
| AND radius_packet_type = 4
| AND radius_acct_status_type = 1
""".stripMargin
val sql =
s"""
|(SELECT
| common_subscriber_id,radius_framed_ip,common_recv_time
|FROM
| tsg_galaxy_v3.radius_record_log
|WHERE
| $where) as dbtable
""".stripMargin
LOG.warn(sql)
initClickhouseData(sql)
}
/*
def getVertexIpDf: DataFrame ={
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| *
|FROM
| (
| (
| SELECT
| common_client_ip AS IP,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(common_c2s_byte_num) as BYTES_SUM,
| 'client' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| UNION ALL
| (
| SELECT
| common_server_ip AS IP,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(common_s2c_byte_num) as BYTES_SUM,
| 'server' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| )
""".stripMargin
LOG.warn(sql)
val vertexIpDf = spark.sql(sql)
vertexIpDf.printSchema()
vertexIpDf
}
def getRelationFqdnLocateIpDf: DataFrame ={
loadConnectionDataFromCk()
val sslSql =
"""
|SELECT
| ssl_sni AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'TLS' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'SSL'
|GROUP BY
| ssl_sni,common_server_ip
""".stripMargin
val httpSql =
"""
|SELECT
| http_host AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'HTTP' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'HTTP'
|GROUP BY
| http_host,common_server_ip
""".stripMargin
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
LOG.warn(sql)
val relationFqdnLocateIpDf = spark.sql(sql)
relationFqdnLocateIpDf.printSchema()
relationFqdnLocateIpDf
}
*/
def getVertexFqdnDf: DataFrame ={
val sql =
"""
|(SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|FROM
| ((SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM tsg_galaxy_v3.connection_record_log
| WHERE common_schema_type = 'SSL' GROUP BY ssl_sni
| FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
| WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni
| )UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM tsg_galaxy_v3.connection_record_log
| WHERE common_schema_type = 'HTTP' GROUP BY http_host))
| FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
| WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host))
|GROUP BY FQDN HAVING FQDN != '') as dbtable
""".stripMargin
LOG.warn(sql)
@@ -194,7 +67,7 @@ object BaseClickhouseData {
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|'client' as ip_type
|FROM tsg_galaxy_v3.connection_record_log
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|where $where
|group by common_client_ip)
|UNION ALL
@@ -205,7 +78,7 @@ object BaseClickhouseData {
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|'server' as ip_type
|FROM tsg_galaxy_v3.connection_record_log
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|where $where
|group by common_server_ip))) as dbtable
""".stripMargin
@@ -223,12 +96,12 @@ object BaseClickhouseData {
|(SELECT * FROM
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|FROM tsg_galaxy_v3.connection_record_log
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|UNION ALL
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|FROM tsg_galaxy_v3.connection_record_log
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|WHERE FQDN != '') as dbtable
""".stripMargin
@@ -250,7 +123,7 @@ object BaseClickhouseData {
s"""
|(
|SELECT common_subscriber_id,radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME
|FROM radius_record_log
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_RADIUS_TABLE}
|WHERE $where GROUP BY common_subscriber_id,radius_framed_ip
|) as dbtable
""".stripMargin
@@ -267,11 +140,13 @@ object BaseClickhouseData {
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
| AND radius_packet_type = 4
| AND radius_acct_status_type = 1
""".stripMargin
val sql =
s"""
|(
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM radius_record_log
|SELECT common_subscriber_id,MAX(common_recv_time) as LAST_FOUND_TIME,MIN(common_recv_time) as FIRST_FOUND_TIME FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_RADIUS_TABLE}
|WHERE $where GROUP BY common_subscriber_id
|)as dbtable
""".stripMargin
@@ -288,11 +163,15 @@ object BaseClickhouseData {
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
| AND radius_packet_type = 4
| AND radius_acct_status_type = 1
|
""".stripMargin
val sql =
s"""
|(
|SELECT DISTINCT radius_framed_ip,common_recv_time as LAST_FOUND_TIME FROM radius_record_log WHERE $where
|SELECT radius_framed_ip,MAX(common_recv_time) as LAST_FOUND_TIME FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_RADIUS_TABLE} WHERE $where
|GROUP BY radius_framed_ip
|)as dbtable
""".stripMargin
LOG.warn(sql)

View File

@@ -3,39 +3,33 @@ package cn.ac.iie.service.transform
import java.util.regex.Pattern
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseClickhouseData
import cn.ac.iie.spark.ArangoSpark
import cn.ac.iie.dao.{BaseArangoData, BaseClickhouseData}
import cn.ac.iie.spark.partition.CustomPartitioner
import cn.ac.iie.spark.rdd.ReadOptions
import cn.ac.iie.spark.rdd.ArangoRdd
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions._
import org.slf4j.LoggerFactory
import cn.ac.iie.utils.SparkSessionUtil._
object MergeDataFrame {
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
private val pattern = Pattern.compile("^[\\d]*$")
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
val fqdnAccmu = getLongAccumulator("FQDN Accumulator")
val fqdnRddRow = BaseClickhouseData.getVertexFqdnDf
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
fqdnAccmu.add(1)
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Row))] = {
val fqdnRddRow: RDD[(String, Row)] = BaseClickhouseData.getVertexFqdnDf
.repartition().rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
(row.getAs[String]("FQDN"), row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
fqdnRddRow.cache()
val fqdnRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"FQDN",options)
})/*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
fqdnRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnRddRow)
val fqdnRddDoc: ArangoRdd[BaseDocument] = BaseArangoData.loadArangoRdd[BaseDocument]("FQDN")
fqdnRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnRddRow)
}
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Option[Row]))]={
val ipAccum = getLongAccumulator("IP Accumulator")
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Row))] = {
val vertexIpDf = BaseClickhouseData.getVertexIpDf
val frame = vertexIpDf.groupBy("IP").agg(
val frame = vertexIpDf.repartition().groupBy("IP").agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
@@ -44,18 +38,16 @@ object MergeDataFrame {
last("common_link_info").alias("common_link_info")
)
val ipRddRow = frame.rdd.map(row => {
ipAccum.add(1)
(row.getAs[String]("IP"), row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val ipRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
ipRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(ipRddRow)
})/*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
val ipRddDoc = BaseArangoData.loadArangoRdd[BaseDocument]("IP")
ipRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(ipRddRow)
}
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
val fqdnLocIpAccum = getLongAccumulator("R_LOCATE_FQDN2IP Accumulator")
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf
.repartition().filter(row => isDomain(row.getAs[String]("FQDN")))
.groupBy("FQDN", "common_server_ip")
.agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
@@ -68,54 +60,46 @@ object MergeDataFrame {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip")
val key = fqdn.concat("-" + serverIp)
fqdnLocIpAccum.add(1)
(key, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val fqdnLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_FQDN2IP",options)
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnLocIpRddRow)
})/*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
val fqdnLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_FQDN2IP")
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnLocIpRddRow)
}
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
val subidLocIpAccum = getLongAccumulator("R_LOCATE_SUBSCRIBER2IP Accumulator")
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
val subidLocIpRddRow = BaseClickhouseData.getRelationSubidLocateIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
val ip = row.getAs[String]("radius_framed_ip")
val key = commonSubscriberId.concat("-" + ip)
subidLocIpAccum.add(1)
(key, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val subidLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_SUBSCRIBER2IP",options)
val subidLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_SUBSCRIBER2IP")
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidLocIpRddRow)
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(subidLocIpRddRow)
}
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
val subidAccum = getLongAccumulator("SUBSCRIBER Accumulator")
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Row))] = {
val subidRddRow = BaseClickhouseData.getVertexSubidDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
subidAccum.add(1)
(commonSubscriberId, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
val subidRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"SUBSCRIBER",options)
val subidRddDoc = BaseArangoData.loadArangoRdd[BaseDocument]("SUBSCRIBER")
subidRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidRddRow)
subidRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(subidRddRow)
}
def mergeVertexFrameIp: RDD[Row] = {
val framedIpAccum = getLongAccumulator("framed ip Accumulator")
val values = BaseClickhouseData.getVertexFramedIpDf
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.rdd.map(row => {
val ip = row.getAs[String]("radius_framed_ip")
framedIpAccum.add(1)
(ip, row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
values

View File

@@ -2,10 +2,10 @@ package cn.ac.iie.service.update
import java.util
import scala.collection.JavaConversions._
import scala.collection.JavaConversions._
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.service.read.ReadHistoryArangoData
import cn.ac.iie.dao.BaseClickhouseData
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import scala.collection.mutable
@@ -29,10 +29,12 @@ object UpdateDocHandler {
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
hisDoc.addAttribute(attributeName, newAttribute + hisAttritube)
}
}
def replaceAttribute(hisDoc: BaseDocument, newAttribute: String, attributeName: String): Unit = {
hisDoc.addAttribute(attributeName,newAttribute)
// hisDoc.addAttribute(attributeName,newAttribute)
hisDoc.updateAttribute(attributeName, newAttribute)
}
def separateAttributeByIpType(ipTypeList: ofRef[String],
@@ -130,7 +132,7 @@ object UpdateDocHandler {
def putDistinctIp(doc: BaseEdgeDocument, newDistinctIp: Array[String]): Unit = {
val map = newDistinctIp.map(ip => {
(ip, ReadHistoryArangoData.currentHour)
(ip, BaseClickhouseData.currentHour)
}).toMap
doc.addAttribute("DIST_CIP", map.keys.toArray)
doc.addAttribute("DIST_CIP_TS", map.values.toArray)
@@ -144,7 +146,7 @@ object UpdateDocHandler {
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq: _*)
newDistinctIp.foreach(cip => {
muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
muDistCipToTsMap.put(cip, BaseClickhouseData.currentHour)
})
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
hisDoc.addAttribute("DIST_CIP", resultMap.keys.toArray)

View File

@@ -19,7 +19,7 @@ object UpdateDocument {
def update(): Unit = {
try {
updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
// updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
updateDocument("SUBSCRIBER", getVertexSubidRow, mergeVertexSubid)
@@ -41,18 +41,23 @@ object UpdateDocument {
}
private def updateDocument[T <: BaseDocument](collName: String,
getDocumentRow: ((String, (Option[T], Option[Row]))) => T,
getJoinRdd: () => RDD[(String, (Option[T], Option[Row]))]
getDocumentRow: ((String, (Option[T], Row))) => T,
getJoinRdd: () => RDD[(String, (Option[T], Row))]
): Unit = {
try {
val start = System.currentTimeMillis()
val joinRdd = getJoinRdd()
val fqdnAccmu = SparkSessionUtil.getLongAccumulator(s"$collName Accumulator")
joinRdd.foreachPartition(iter => {
val resultDocumentList = new util.ArrayList[T]
var i = 0
iter.foreach(row => {
val document = getDocumentRow(row)
val document: T = getDocumentRow(row)
if (document != null) {
fqdnAccmu.add(1)
resultDocumentList.add(document)
}
i += 1
@@ -67,8 +72,11 @@ object UpdateDocument {
LOG.warn(s"更新$collName:" + i)
}
})
LOG.warn(s"更新$collName 条数:${fqdnAccmu.value}")
val last = System.currentTimeMillis()
LOG.warn(s"更新$collName 时间${last - start}")
LOG.warn(s"更新$collName 时间:${last - start}")
} catch {
case e: Exception => e.printStackTrace()
}
@@ -103,7 +111,7 @@ object UpdateDocument {
document
}
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument ={
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Row))): BaseEdgeDocument = {
val subidLocIpDocOpt = joinRow._2._1
var subidLocIpDoc = subidLocIpDocOpt match {
@@ -111,12 +119,12 @@ object UpdateDocument {
case None => null
}
val subidLocIpRowOpt = joinRow._2._2
val subidLocIpRow = joinRow._2._2
val subidLocIpRow = subidLocIpRowOpt match {
case Some(r) => r
case None => null
}
// val subidLocIpRow = subidLocIpRowOpt match {
// case Some(r) => r
// case None => null
// }
if (subidLocIpRow != null) {
val subId = subidLocIpRow.getAs[String]("common_subscriber_id")
@@ -141,19 +149,19 @@ object UpdateDocument {
subidLocIpDoc
}
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument ={
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
val subidDocOpt = joinRow._2._1
var subidDoc = subidDocOpt match {
case Some(doc) => doc
case None => null
}
val subidRowOpt = joinRow._2._2
val subidRow = joinRow._2._2
val subidRow = subidRowOpt match {
case Some(r) => r
case None => null
}
// val subidRow = subidRowOpt match {
// case Some(r) => r
// case None => null
// }
if (subidRow != null) {
val subId = subidRow.getAs[String]("common_subscriber_id")
@@ -173,19 +181,19 @@ object UpdateDocument {
subidDoc
}
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
val fqdnDocOpt = joinRow._2._1
var fqdnDoc = fqdnDocOpt match {
case Some(doc) => doc
case None => null
}
val fqdnRowOpt = joinRow._2._2
val fqdnRow: Row = joinRow._2._2
val fqdnRow = fqdnRowOpt match {
case Some(r) => r
case None => null
}
// val fqdnRow = fqdnRowOpt match {
// case Some(r) => r
// case None => null
// }
if (fqdnRow != null) {
val fqdn = fqdnRow.getAs[String]("FQDN")
@@ -205,19 +213,19 @@ object UpdateDocument {
fqdnDoc
}
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
val ipDocOpt = joinRow._2._1
var ipDoc = ipDocOpt match {
case Some(doc) => doc
case None => null
}
val ipRowOpt = joinRow._2._2
val ipRow = joinRow._2._2
val ipRow = ipRowOpt match {
case Some(r) => r
case None => null
}
// val ipRow = ipRowOpt match {
// case Some(r) => r
// case None => null
// }
if (ipRow != null) {
val ip = ipRow.getAs[String]("IP")
@@ -253,7 +261,7 @@ object UpdateDocument {
ipDoc
}
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument = {
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Row))): BaseEdgeDocument = {
val fqdnLocIpDocOpt = joinRow._2._1
var fqdnLocIpDoc = fqdnLocIpDocOpt match {
@@ -261,12 +269,12 @@ object UpdateDocument {
case None => null
}
val fqdnLocIpRowOpt = joinRow._2._2
val fqdnLocIpRow = joinRow._2._2
val fqdnLocIpRow = fqdnLocIpRowOpt match {
case Some(r) => r
case None => null
}
// val fqdnLocIpRow = fqdnLocIpRowOpt match {
// case Some(r) => r
// case None => null
// }
if (fqdnLocIpDoc != null) {
updateProtocolDocument(fqdnLocIpDoc)

View File

@@ -1,11 +1,15 @@
package cn.ac.iie.spark.rdd
import java.util
import scala.collection.JavaConverters.asScalaIteratorConverter
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.service.update.UpdateDocument
import cn.ac.iie.spark
import cn.ac.iie.spark.partition.QueryArangoPartition
import com.arangodb.ArangoCursor
import com.arangodb.model.AqlQueryOptions
import com.arangodb.util.MapBuilder
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.slf4j.LoggerFactory
@@ -38,13 +42,15 @@ class ArangoRdd[T: ClassTag](@transient override val sparkContext: SparkContext,
var arangoCursor:ArangoCursor[T] = null
val arangoDB = spark.createArangoBuilder(options).build()
val bindVars: util.Map[String, AnyRef] = new MapBuilder().get
val queryOptions: AqlQueryOptions = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL)
try {
val offset = split.offset
val separate = split.separate
val collection = options.collection
val sql = s"FOR doc IN $collection limit $offset,$separate RETURN doc"
LOG.info(sql)
arangoCursor = arangoDB.db(options.database).query(sql,clazz.runtimeClass.asInstanceOf[Class[T]])
arangoCursor = arangoDB.db(options.database).query(sql,bindVars,queryOptions,clazz.runtimeClass.asInstanceOf[Class[T]])
}catch {
case e: Exception => LOG.error(s"创建Cursor异常:${e.getMessage}")
}finally {
@@ -71,6 +77,9 @@ class ArangoRdd[T: ClassTag](@transient override val sparkContext: SparkContext,
try {
val longs = arangoDB.db(options.database).query(sql, classOf[Long])
while (longs.hasNext) cnt = longs.next
if (cnt > ApplicationConfig.ARANGODB_TOTAL_NUM){
cnt = ApplicationConfig.ARANGODB_TOTAL_NUM
}
} catch {
case e: Exception => LOG.error(sql + s"执行异常:${e.getMessage}")
}finally {

View File

@@ -30,7 +30,7 @@ object SparkSessionUtil {
spark
}
def getContext: SparkContext = {
private def getContext: SparkContext = {
@transient var sc: SparkContext = null
if (sparkContext == null) sc = spark.sparkContext
sc