格式化代码
This commit is contained in:
@@ -1,75 +0,0 @@
|
||||
package cn.ac.iie.dao;
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig;
|
||||
import cn.ac.iie.service.read.ReadHistoryArangoData;
|
||||
import cn.ac.iie.utils.ArangoDBConnect;
|
||||
import cn.ac.iie.utils.ExecutorThreadPool;
|
||||
import com.arangodb.ArangoCursor;
|
||||
import com.arangodb.entity.BaseDocument;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* 获取arangoDB历史数据
|
||||
*
|
||||
* @author wlh
|
||||
*/
|
||||
public class BaseArangoData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
|
||||
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
|
||||
|
||||
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
|
||||
|
||||
public <T extends BaseDocument> void readHistoryData(String table,
|
||||
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
|
||||
Class<T> type) {
|
||||
try {
|
||||
LOG.warn("开始更新" + table);
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
||||
historyMap.put(i, new ConcurrentHashMap<>());
|
||||
}
|
||||
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
|
||||
// long[] timeRange = getTimeRange(table);
|
||||
Long countTotal = getCountTotal(table);
|
||||
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
|
||||
// String sql = getQuerySql(timeRange, i, table);
|
||||
String sql = getQuerySql(countTotal, i, table);
|
||||
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
|
||||
threadPool.executor(readHistoryArangoData);
|
||||
}
|
||||
countDownLatch.await();
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.warn("读取" + table + " arangoDB 共耗时:" + (last - start));
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private Long getCountTotal(String table){
|
||||
long start = System.currentTimeMillis();
|
||||
Long cnt = 0L;
|
||||
String sql = "RETURN LENGTH("+table+")";
|
||||
try {
|
||||
ArangoCursor<Long> longs = arangoDBConnect.executorQuery(sql, Long.class);
|
||||
while (longs.hasNext()){
|
||||
cnt = longs.next();
|
||||
}
|
||||
}catch (Exception e){
|
||||
LOG.error(sql +"执行异常");
|
||||
}
|
||||
long last = System.currentTimeMillis();
|
||||
LOG.info(sql+" 结果:"+cnt+" 执行时间:"+(last-start));
|
||||
return cnt;
|
||||
}
|
||||
|
||||
private String getQuerySql(Long cnt,int threadNumber, String table){
|
||||
long sepNum = cnt / ApplicationConfig.THREAD_POOL_NUMBER() + 1;
|
||||
long offsetNum = threadNumber * sepNum;
|
||||
return "FOR doc IN " + table + " limit "+offsetNum+","+sepNum+" RETURN doc";
|
||||
}
|
||||
|
||||
}
|
||||
@@ -22,6 +22,7 @@ public class ArangoDBConnect {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
|
||||
private static ArangoDB arangoDB = null;
|
||||
private static ArangoDBConnect conn = null;
|
||||
|
||||
static {
|
||||
getArangoDatabase();
|
||||
}
|
||||
@@ -105,12 +106,11 @@ public class ArangoDBConnect {
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
System.out.println("更新失败:"+e.toString());
|
||||
LOG.error("更新失败:" + e.toString());
|
||||
} finally {
|
||||
docOverwrite.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ spark.read.clickhouse.fetchsize=10000
|
||||
spark.read.clickhouse.partitionColumn=LAST_FOUND_TIME
|
||||
clickhouse.socket.timeout=300000
|
||||
#arangoDB配置
|
||||
arangoDB.host=192.168.40.182
|
||||
arangoDB.host=192.168.40.223
|
||||
arangoDB.port=8529
|
||||
arangoDB.user=upsert
|
||||
arangoDB.password=ceiec2018
|
||||
arangoDB.DB.name=ip-learning-test-0
|
||||
arangoDB.user=root
|
||||
arangoDB.password=galaxy_2019
|
||||
arangoDB.DB.name=tsg_galaxy_v3
|
||||
#arangoDB.DB.name=iplearn_media_domain
|
||||
arangoDB.ttl=3600
|
||||
|
||||
@@ -27,8 +27,8 @@ thread.pool.number=10
|
||||
|
||||
#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
|
||||
clickhouse.time.limit.type=1
|
||||
read.clickhouse.max.time=1603785961
|
||||
read.clickhouse.min.time=1603354682
|
||||
read.clickhouse.max.time=1608518990
|
||||
read.clickhouse.min.time=1604851201
|
||||
|
||||
arangoDB.read.limit=1
|
||||
update.arango.batch=10000
|
||||
|
||||
@@ -4,20 +4,15 @@ log4j.logger.org.apache.http.wire=OFF
|
||||
|
||||
#Log4j
|
||||
log4j.rootLogger=info,console,file
|
||||
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.console.Threshold=warn
|
||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
|
||||
|
||||
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
|
||||
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.file.Threshold=warn
|
||||
log4j.appender.file.encoding=UTF-8
|
||||
log4j.appender.file.Append=true
|
||||
#·<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><D3A6>Ŀ<EFBFBD><C4BF>
|
||||
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
|
||||
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
|
||||
log4j.appender.file.file=./logs/ip-learning-application.log
|
||||
log4j.appender.file.DatePattern='.'yyyy-MM-dd
|
||||
log4j.appender.file.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
package cn.ac.iie.dao
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.spark.ArangoSpark
|
||||
import cn.ac.iie.spark.rdd.{ArangoRdd, ReadOptions}
|
||||
import cn.ac.iie.utils.SparkSessionUtil.sparkContext
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
object BaseArangoData {
|
||||
private val LOG = LoggerFactory.getLogger(BaseArangoData.getClass)
|
||||
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
|
||||
|
||||
def loadArangoRdd[T](name:String): ArangoRdd[T] ={
|
||||
val value = ArangoSpark.load[T](sparkContext, name, options)
|
||||
|
||||
LOG.warn(s"读取$name arangoDb:${value.count()}")
|
||||
value
|
||||
}
|
||||
|
||||
}
|
||||
@@ -32,134 +32,6 @@ object BaseClickhouseData {
|
||||
dataFrame
|
||||
}
|
||||
|
||||
def loadConnectionDataFromCk(): Unit ={
|
||||
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT
|
||||
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|
||||
|FROM
|
||||
| connection_record_log
|
||||
|WHERE $where) as dbtable
|
||||
""".stripMargin
|
||||
|
||||
LOG.warn(sql)
|
||||
initClickhouseData(sql)
|
||||
}
|
||||
|
||||
private def loadRadiusDataFromCk(): Unit ={
|
||||
val where =
|
||||
s"""
|
||||
| common_recv_time >= ${timeLimit._2}
|
||||
| AND common_recv_time < ${timeLimit._1}
|
||||
| AND common_subscriber_id != ''
|
||||
| AND radius_framed_ip != ''
|
||||
| AND radius_packet_type = 4
|
||||
| AND radius_acct_status_type = 1
|
||||
""".stripMargin
|
||||
val sql =
|
||||
s"""
|
||||
|(SELECT
|
||||
| common_subscriber_id,radius_framed_ip,common_recv_time
|
||||
|FROM
|
||||
| tsg_galaxy_v3.radius_record_log
|
||||
|WHERE
|
||||
| $where) as dbtable
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
initClickhouseData(sql)
|
||||
}
|
||||
|
||||
/*
|
||||
def getVertexIpDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
val sql =
|
||||
"""
|
||||
|SELECT
|
||||
| *
|
||||
|FROM
|
||||
| (
|
||||
| (
|
||||
| SELECT
|
||||
| common_client_ip AS IP,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| count(*) as SESSION_COUNT,
|
||||
| sum(common_c2s_byte_num) as BYTES_SUM,
|
||||
| 'client' as ip_type
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| GROUP BY
|
||||
| IP
|
||||
| )
|
||||
| UNION ALL
|
||||
| (
|
||||
| SELECT
|
||||
| common_server_ip AS IP,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| count(*) as SESSION_COUNT,
|
||||
| sum(common_s2c_byte_num) as BYTES_SUM,
|
||||
| 'server' as ip_type
|
||||
| FROM
|
||||
| global_temp.dbtable
|
||||
| GROUP BY
|
||||
| IP
|
||||
| )
|
||||
| )
|
||||
""".stripMargin
|
||||
LOG.warn(sql)
|
||||
val vertexIpDf = spark.sql(sql)
|
||||
vertexIpDf.printSchema()
|
||||
vertexIpDf
|
||||
}
|
||||
|
||||
def getRelationFqdnLocateIpDf: DataFrame ={
|
||||
loadConnectionDataFromCk()
|
||||
val sslSql =
|
||||
"""
|
||||
|SELECT
|
||||
| ssl_sni AS FQDN,
|
||||
| common_server_ip,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| COUNT(*) AS COUNT_TOTAL,
|
||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
||||
| 'TLS' AS schema_type
|
||||
|FROM
|
||||
| global_temp.dbtable
|
||||
|WHERE
|
||||
| common_schema_type = 'SSL'
|
||||
|GROUP BY
|
||||
| ssl_sni,common_server_ip
|
||||
""".stripMargin
|
||||
|
||||
val httpSql =
|
||||
"""
|
||||
|SELECT
|
||||
| http_host AS FQDN,
|
||||
| common_server_ip,
|
||||
| MAX(common_recv_time) AS LAST_FOUND_TIME,
|
||||
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
|
||||
| COUNT(*) AS COUNT_TOTAL,
|
||||
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
|
||||
| 'HTTP' AS schema_type
|
||||
|FROM
|
||||
| global_temp.dbtable
|
||||
|WHERE
|
||||
| common_schema_type = 'HTTP'
|
||||
|GROUP BY
|
||||
| http_host,common_server_ip
|
||||
""".stripMargin
|
||||
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
|
||||
|
||||
LOG.warn(sql)
|
||||
val relationFqdnLocateIpDf = spark.sql(sql)
|
||||
relationFqdnLocateIpDf.printSchema()
|
||||
relationFqdnLocateIpDf
|
||||
}
|
||||
*/
|
||||
|
||||
def getVertexFqdnDf: DataFrame = {
|
||||
val sql =
|
||||
"""
|
||||
@@ -168,12 +40,12 @@ object BaseClickhouseData {
|
||||
|FROM
|
||||
| ((SELECT
|
||||
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM tsg_galaxy_v3.connection_record_log
|
||||
| FROM connection_record_log
|
||||
| WHERE common_schema_type = 'SSL' GROUP BY ssl_sni
|
||||
| )UNION ALL
|
||||
| (SELECT
|
||||
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
|
||||
| FROM tsg_galaxy_v3.connection_record_log
|
||||
| FROM connection_record_log
|
||||
| WHERE common_schema_type = 'HTTP' GROUP BY http_host))
|
||||
|GROUP BY FQDN HAVING FQDN != '') as dbtable
|
||||
""".stripMargin
|
||||
@@ -194,7 +66,7 @@ object BaseClickhouseData {
|
||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||
|groupUniqArray(2)(common_link_info_c2s)[2] as common_link_info,
|
||||
|'client' as ip_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|FROM connection_record_log
|
||||
|where $where
|
||||
|group by common_client_ip)
|
||||
|UNION ALL
|
||||
@@ -205,7 +77,7 @@ object BaseClickhouseData {
|
||||
|SUM(common_c2s_byte_num+common_s2c_byte_num) as BYTES_SUM,
|
||||
|groupUniqArray(2)(common_link_info_s2c)[2] as common_link_info,
|
||||
|'server' as ip_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|FROM connection_record_log
|
||||
|where $where
|
||||
|group by common_server_ip))) as dbtable
|
||||
""".stripMargin
|
||||
@@ -223,12 +95,12 @@ object BaseClickhouseData {
|
||||
|(SELECT * FROM
|
||||
|((SELECT ssl_sni AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'TLS' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|FROM connection_record_log
|
||||
|WHERE $where and common_schema_type = 'SSL' GROUP BY ssl_sni,common_server_ip)
|
||||
|UNION ALL
|
||||
|(SELECT http_host AS FQDN,common_server_ip,MAX(common_recv_time) AS LAST_FOUND_TIME,MIN(common_recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|
||||
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(common_client_ip)) AS DIST_CIP_RECENT,'HTTP' AS schema_type
|
||||
|FROM tsg_galaxy_v3.connection_record_log
|
||||
|FROM connection_record_log
|
||||
|WHERE $where and common_schema_type = 'HTTP' GROUP BY http_host,common_server_ip))
|
||||
|WHERE FQDN != '') as dbtable
|
||||
""".stripMargin
|
||||
|
||||
@@ -3,37 +3,31 @@ package cn.ac.iie.service.transform
|
||||
import java.util.regex.Pattern
|
||||
|
||||
import cn.ac.iie.config.ApplicationConfig
|
||||
import cn.ac.iie.dao.BaseClickhouseData
|
||||
import cn.ac.iie.spark.ArangoSpark
|
||||
import cn.ac.iie.dao.{BaseArangoData, BaseClickhouseData}
|
||||
import cn.ac.iie.spark.partition.CustomPartitioner
|
||||
import cn.ac.iie.spark.rdd.ReadOptions
|
||||
import cn.ac.iie.spark.rdd.ArangoRdd
|
||||
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.functions._
|
||||
import org.slf4j.LoggerFactory
|
||||
import cn.ac.iie.utils.SparkSessionUtil._
|
||||
|
||||
object MergeDataFrame {
|
||||
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
|
||||
private val pattern = Pattern.compile("^[\\d]*$")
|
||||
private val options = ReadOptions(ApplicationConfig.ARANGODB_DB_NAME)
|
||||
|
||||
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
|
||||
val fqdnAccmu = getLongAccumulator("FQDN Accumulator")
|
||||
val fqdnRddRow = BaseClickhouseData.getVertexFqdnDf
|
||||
def mergeVertexFqdn(): RDD[(String, (Option[BaseDocument], Row))] = {
|
||||
val fqdnRddRow: RDD[(String, Row)] = BaseClickhouseData.getVertexFqdnDf
|
||||
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => {
|
||||
fqdnAccmu.add(1)
|
||||
(row.getAs[String]("FQDN"), row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
fqdnRddRow.cache()
|
||||
val fqdnRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"FQDN",options)
|
||||
|
||||
fqdnRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnRddRow)
|
||||
val fqdnRddDoc: ArangoRdd[BaseDocument] = BaseArangoData.loadArangoRdd[BaseDocument]("FQDN")
|
||||
|
||||
fqdnRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnRddRow)
|
||||
}
|
||||
|
||||
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Option[Row]))]={
|
||||
val ipAccum = getLongAccumulator("IP Accumulator")
|
||||
def mergeVertexIp(): RDD[(String, (Option[BaseDocument], Row))] = {
|
||||
val vertexIpDf = BaseClickhouseData.getVertexIpDf
|
||||
val frame = vertexIpDf.groupBy("IP").agg(
|
||||
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
|
||||
@@ -44,17 +38,14 @@ object MergeDataFrame {
|
||||
last("common_link_info").alias("common_link_info")
|
||||
)
|
||||
val ipRddRow = frame.rdd.map(row => {
|
||||
ipAccum.add(1)
|
||||
(row.getAs[String]("IP"), row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
val ipRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"IP",options)
|
||||
|
||||
ipRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(ipRddRow)
|
||||
val ipRddDoc = BaseArangoData.loadArangoRdd[BaseDocument]("IP")
|
||||
ipRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(ipRddRow)
|
||||
|
||||
}
|
||||
|
||||
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
|
||||
val fqdnLocIpAccum = getLongAccumulator("R_LOCATE_FQDN2IP Accumulator")
|
||||
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
|
||||
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
|
||||
.groupBy("FQDN", "common_server_ip")
|
||||
.agg(
|
||||
@@ -68,54 +59,46 @@ object MergeDataFrame {
|
||||
val fqdn = row.getAs[String]("FQDN")
|
||||
val serverIp = row.getAs[String]("common_server_ip")
|
||||
val key = fqdn.concat("-" + serverIp)
|
||||
fqdnLocIpAccum.add(1)
|
||||
(key, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
val fqdnLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_FQDN2IP",options)
|
||||
|
||||
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(fqdnLocIpRddRow)
|
||||
val fqdnLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_FQDN2IP")
|
||||
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnLocIpRddRow)
|
||||
|
||||
}
|
||||
|
||||
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Option[Row]))] ={
|
||||
val subidLocIpAccum = getLongAccumulator("R_LOCATE_SUBSCRIBER2IP Accumulator")
|
||||
def mergeRelationSubidLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
|
||||
val subidLocIpRddRow = BaseClickhouseData.getRelationSubidLocateIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
val key = commonSubscriberId.concat("-" + ip)
|
||||
subidLocIpAccum.add(1)
|
||||
(key, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
val subidLocIpRddDoc = ArangoSpark.load[BaseEdgeDocument](sparkContext,"R_LOCATE_SUBSCRIBER2IP",options)
|
||||
val subidLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_SUBSCRIBER2IP")
|
||||
|
||||
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidLocIpRddRow)
|
||||
subidLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(subidLocIpRddRow)
|
||||
}
|
||||
|
||||
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Option[Row]))] ={
|
||||
val subidAccum = getLongAccumulator("SUBSCRIBER Accumulator")
|
||||
def mergeVertexSubid(): RDD[(String, (Option[BaseDocument], Row))] = {
|
||||
val subidRddRow = BaseClickhouseData.getVertexSubidDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val commonSubscriberId = row.getAs[String]("common_subscriber_id")
|
||||
subidAccum.add(1)
|
||||
(commonSubscriberId, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))
|
||||
|
||||
val subidRddDoc = ArangoSpark.load[BaseDocument](sparkContext,"SUBSCRIBER",options)
|
||||
val subidRddDoc = BaseArangoData.loadArangoRdd[BaseDocument]("SUBSCRIBER")
|
||||
|
||||
subidRddDoc.map(doc => (doc.getKey, doc)).fullOuterJoin(subidRddRow)
|
||||
subidRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(subidRddRow)
|
||||
|
||||
}
|
||||
|
||||
def mergeVertexFrameIp: RDD[Row] = {
|
||||
val framedIpAccum = getLongAccumulator("framed ip Accumulator")
|
||||
val values = BaseClickhouseData.getVertexFramedIpDf
|
||||
.repartition(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
|
||||
.rdd.map(row => {
|
||||
val ip = row.getAs[String]("radius_framed_ip")
|
||||
framedIpAccum.add(1)
|
||||
(ip, row)
|
||||
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
|
||||
values
|
||||
|
||||
@@ -29,10 +29,12 @@ object UpdateDocHandler {
|
||||
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
|
||||
hisDoc.addAttribute(attributeName, newAttribute + hisAttritube)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
def replaceAttribute(hisDoc: BaseDocument, newAttribute: String, attributeName: String): Unit = {
|
||||
hisDoc.addAttribute(attributeName,newAttribute)
|
||||
// hisDoc.addAttribute(attributeName,newAttribute)
|
||||
hisDoc.updateAttribute(attributeName, newAttribute)
|
||||
}
|
||||
|
||||
def separateAttributeByIpType(ipTypeList: ofRef[String],
|
||||
|
||||
@@ -19,13 +19,13 @@ object UpdateDocument {
|
||||
|
||||
def update(): Unit = {
|
||||
try {
|
||||
updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
|
||||
// updateDocument("FQDN", getVertexFqdnRow, mergeVertexFqdn)
|
||||
|
||||
updateDocument("SUBSCRIBER",getVertexSubidRow,mergeVertexSubid)
|
||||
// updateDocument("SUBSCRIBER", getVertexSubidRow, mergeVertexSubid)
|
||||
|
||||
insertFrameIp()
|
||||
// insertFrameIp()
|
||||
|
||||
updateDocument("R_LOCATE_SUBSCRIBER2IP",getRelationSubidLocateIpRow,mergeRelationSubidLocateIp)
|
||||
// updateDocument("R_LOCATE_SUBSCRIBER2IP", getRelationSubidLocateIpRow, mergeRelationSubidLocateIp)
|
||||
|
||||
updateDocument("R_LOCATE_FQDN2IP", getRelationFqdnLocateIpRow, mergeRelationFqdnLocateIp)
|
||||
|
||||
@@ -41,18 +41,23 @@ object UpdateDocument {
|
||||
}
|
||||
|
||||
private def updateDocument[T <: BaseDocument](collName: String,
|
||||
getDocumentRow: ((String, (Option[T], Option[Row]))) => T,
|
||||
getJoinRdd: () => RDD[(String, (Option[T], Option[Row]))]
|
||||
getDocumentRow: ((String, (Option[T], Row))) => T,
|
||||
getJoinRdd: () => RDD[(String, (Option[T], Row))]
|
||||
): Unit = {
|
||||
try {
|
||||
val start = System.currentTimeMillis()
|
||||
val joinRdd = getJoinRdd()
|
||||
|
||||
val fqdnAccmu = SparkSessionUtil.getLongAccumulator(s"$collName Accumulator")
|
||||
|
||||
joinRdd.foreachPartition(iter => {
|
||||
val resultDocumentList = new util.ArrayList[T]
|
||||
var i = 0
|
||||
iter.foreach(row => {
|
||||
val document = getDocumentRow(row)
|
||||
val document: T = getDocumentRow(row)
|
||||
if (document != null) {
|
||||
fqdnAccmu.add(1)
|
||||
|
||||
resultDocumentList.add(document)
|
||||
}
|
||||
i += 1
|
||||
@@ -67,8 +72,11 @@ object UpdateDocument {
|
||||
LOG.warn(s"更新$collName:" + i)
|
||||
}
|
||||
})
|
||||
|
||||
LOG.warn(s"更新$collName 条数:${fqdnAccmu.value}")
|
||||
|
||||
val last = System.currentTimeMillis()
|
||||
LOG.warn(s"更新$collName 时间:${last - start}")
|
||||
LOG.warn(s"更新$collName 时间:${last - start}")
|
||||
} catch {
|
||||
case e: Exception => e.printStackTrace()
|
||||
}
|
||||
@@ -103,7 +111,7 @@ object UpdateDocument {
|
||||
document
|
||||
}
|
||||
|
||||
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument ={
|
||||
private def getRelationSubidLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Row))): BaseEdgeDocument = {
|
||||
|
||||
val subidLocIpDocOpt = joinRow._2._1
|
||||
var subidLocIpDoc = subidLocIpDocOpt match {
|
||||
@@ -111,12 +119,12 @@ object UpdateDocument {
|
||||
case None => null
|
||||
}
|
||||
|
||||
val subidLocIpRowOpt = joinRow._2._2
|
||||
val subidLocIpRow = joinRow._2._2
|
||||
|
||||
val subidLocIpRow = subidLocIpRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
// val subidLocIpRow = subidLocIpRowOpt match {
|
||||
// case Some(r) => r
|
||||
// case None => null
|
||||
// }
|
||||
|
||||
if (subidLocIpRow != null) {
|
||||
val subId = subidLocIpRow.getAs[String]("common_subscriber_id")
|
||||
@@ -141,19 +149,19 @@ object UpdateDocument {
|
||||
subidLocIpDoc
|
||||
}
|
||||
|
||||
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument ={
|
||||
private def getVertexSubidRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
|
||||
val subidDocOpt = joinRow._2._1
|
||||
var subidDoc = subidDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val subidRowOpt = joinRow._2._2
|
||||
val subidRow = joinRow._2._2
|
||||
|
||||
val subidRow = subidRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
// val subidRow = subidRowOpt match {
|
||||
// case Some(r) => r
|
||||
// case None => null
|
||||
// }
|
||||
|
||||
if (subidRow != null) {
|
||||
val subId = subidRow.getAs[String]("common_subscriber_id")
|
||||
@@ -173,19 +181,19 @@ object UpdateDocument {
|
||||
subidDoc
|
||||
}
|
||||
|
||||
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
|
||||
private def getVertexFqdnRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
|
||||
val fqdnDocOpt = joinRow._2._1
|
||||
var fqdnDoc = fqdnDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val fqdnRowOpt = joinRow._2._2
|
||||
val fqdnRow: Row = joinRow._2._2
|
||||
|
||||
val fqdnRow = fqdnRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
// val fqdnRow = fqdnRowOpt match {
|
||||
// case Some(r) => r
|
||||
// case None => null
|
||||
// }
|
||||
|
||||
if (fqdnRow != null) {
|
||||
val fqdn = fqdnRow.getAs[String]("FQDN")
|
||||
@@ -205,19 +213,19 @@ object UpdateDocument {
|
||||
fqdnDoc
|
||||
}
|
||||
|
||||
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Option[Row]))): BaseDocument = {
|
||||
private def getVertexIpRow(joinRow: (String, (Option[BaseDocument], Row))): BaseDocument = {
|
||||
val ipDocOpt = joinRow._2._1
|
||||
var ipDoc = ipDocOpt match {
|
||||
case Some(doc) => doc
|
||||
case None => null
|
||||
}
|
||||
|
||||
val ipRowOpt = joinRow._2._2
|
||||
val ipRow = joinRow._2._2
|
||||
|
||||
val ipRow = ipRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
// val ipRow = ipRowOpt match {
|
||||
// case Some(r) => r
|
||||
// case None => null
|
||||
// }
|
||||
|
||||
if (ipRow != null) {
|
||||
val ip = ipRow.getAs[String]("IP")
|
||||
@@ -253,7 +261,7 @@ object UpdateDocument {
|
||||
ipDoc
|
||||
}
|
||||
|
||||
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Option[Row]))): BaseEdgeDocument = {
|
||||
private def getRelationFqdnLocateIpRow(joinRow: (String, (Option[BaseEdgeDocument], Row))): BaseEdgeDocument = {
|
||||
|
||||
val fqdnLocIpDocOpt = joinRow._2._1
|
||||
var fqdnLocIpDoc = fqdnLocIpDocOpt match {
|
||||
@@ -261,12 +269,12 @@ object UpdateDocument {
|
||||
case None => null
|
||||
}
|
||||
|
||||
val fqdnLocIpRowOpt = joinRow._2._2
|
||||
val fqdnLocIpRow = joinRow._2._2
|
||||
|
||||
val fqdnLocIpRow = fqdnLocIpRowOpt match {
|
||||
case Some(r) => r
|
||||
case None => null
|
||||
}
|
||||
// val fqdnLocIpRow = fqdnLocIpRowOpt match {
|
||||
// case Some(r) => r
|
||||
// case None => null
|
||||
// }
|
||||
|
||||
if (fqdnLocIpDoc != null) {
|
||||
updateProtocolDocument(fqdnLocIpDoc)
|
||||
|
||||
@@ -30,7 +30,7 @@ object SparkSessionUtil {
|
||||
spark
|
||||
}
|
||||
|
||||
def getContext: SparkContext = {
|
||||
private def getContext: SparkContext = {
|
||||
@transient var sc: SparkContext = null
|
||||
if (sparkContext == null) sc = spark.sparkContext
|
||||
sc
|
||||
|
||||
Reference in New Issue
Block a user