IP Learning tsg项目 spark版本首次提交

This commit is contained in:
wanglihui
2020-08-06 16:13:59 +08:00
parent 4e58044a16
commit 0d02f2066c
16 changed files with 862 additions and 69 deletions

View File

@@ -1,5 +1,51 @@
package cn.ac.iie.config
import com.typesafe.config.{Config, ConfigFactory}
object ApplicationConfig {
private lazy val config: Config = ConfigFactory.load()
val SPARK_SQL_SHUFFLE_PARTITIONS: Int = config.getInt("spark.sql.shuffle.partitions")
val SPARK_EXECUTOR_MEMORY: String = config.getString("spark.executor.memory")
val SPARK_APP_NAME: String = config.getString("spark.app.name")
val SPARK_NETWORK_TIMEOUT: String = config.getString("spark.network.timeout")
// val REPARTITION_NUMBER: Int = config.getInt("repartitionNumber")
val MASTER: String = config.getString("master")
val SPARK_SERIALIZER: String = config.getString("spark.serializer")
val NUMPARTITIONS: String = config.getString("spark.read.clickhouse.numPartitions")
val SPARK_READ_CLICKHOUSE_URL: String = config.getString("spark.read.clickhouse.url")
val SPARK_READ_CLICKHOUSE_DRIVER: String = config.getString("spark.read.clickhouse.driver")
val SPARK_READ_CLICKHOUSE_USER: String = config.getString("spark.read.clickhouse.user")
val SPARK_READ_CLICKHOUSE_PASSWORD: String = config.getString("spark.read.clickhouse.password")
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
val ARANGODB_HOST: String= config.getString("arangoDB.host")
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
val ARANGODB_USER: String= config.getString("arangoDB.user")
val ARANGODB_PASSWORD:String= config.getString("arangoDB.password")
val ARANGODB_DB_NAME:String= config.getString("arangoDB.DB.name")
val ARANGODB_TTL: Int = config.getInt("arangoDB.ttl")
val CLICKHOUSE_SOCKET_TIMEOUT: Int = config.getInt("clickhouse.socket.timeout")
val THREAD_POOL_NUMBER: Int = config.getInt("thread.pool.number")
val CLICKHOUSE_TIME_LIMIT_TYPE: Int = config.getInt("clickhouse.time.limit.type")
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type")
val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
val UPDATE_INTERVAL: Int = config.getInt("update.interval")
}

View File

@@ -1,5 +1,209 @@
package cn.ac.iie.dao
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.utils.SparkSessionUtil.spark
import org.apache.spark.sql.DataFrame
import org.slf4j.LoggerFactory
object BaseClickhouseData {
private val LOG = LoggerFactory.getLogger(BaseClickhouseData.getClass)
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
private val timeLimit: (Long, Long) = getTimeLimit
private def initClickhouseData(sql:String): Unit ={
val dataFrame: DataFrame = spark.read.format("jdbc")
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
.option("dbtable", sql)
.option("driver", ApplicationConfig.SPARK_READ_CLICKHOUSE_DRIVER)
.option("user", ApplicationConfig.SPARK_READ_CLICKHOUSE_USER)
.option("password", ApplicationConfig.SPARK_READ_CLICKHOUSE_PASSWORD)
.option("numPartitions", ApplicationConfig.NUMPARTITIONS)
.option("partitionColumn", ApplicationConfig.SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN)
.option("lowerBound", timeLimit._2)
.option("upperBound", timeLimit._1)
.option("fetchsize", ApplicationConfig.SPARK_READ_CLICKHOUSE_FETCHSIZE)
.option("socket_timeout",ApplicationConfig.CLICKHOUSE_SOCKET_TIMEOUT)
.load()
dataFrame.printSchema()
dataFrame.createOrReplaceGlobalTempView("dbtable")
}
def loadConnectionDataFromCk(): Unit ={
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|FROM
| connection_record_log
|WHERE $where) as dbtable
""".stripMargin
LOG.warn(sql)
initClickhouseData(sql)
}
private def loadRadiusDataFromCk(): Unit ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
| AND radius_packet_type = 4
| AND radius_acct_status_type = 1
""".stripMargin
val sql =
s"""
|(SELECT
| common_subscriber_id,radius_framed_ip,common_recv_time
|FROM
| tsg_galaxy_v3.radius_record_log
|WHERE
| $where) as dbtable
""".stripMargin
LOG.warn(sql)
initClickhouseData(sql)
}
def getVertexFqdnDf: DataFrame ={
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|FROM
| (
| (SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'SSL' GROUP BY ssl_sni
| )
| UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'HTTP' GROUP BY http_host
| )
| )
|GROUP BY
| FQDN
|HAVING
| FQDN != ''
""".stripMargin
LOG.warn(sql)
val vertexFqdnDf = spark.sql(sql)
vertexFqdnDf.printSchema()
vertexFqdnDf
}
def getVertexIpDf: DataFrame ={
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| *
|FROM
| (
| (
| SELECT
| common_client_ip AS IP,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(common_c2s_byte_num) as BYTES_SUM,
| 'client' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| UNION ALL
| (
| SELECT
| common_server_ip AS IP,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(common_s2c_byte_num) as BYTES_SUM,
| 'server' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| )
""".stripMargin
LOG.warn(sql)
val vertexIpDf = spark.sql(sql)
vertexIpDf.printSchema()
vertexIpDf
}
def getRelationFqdnLocateIpDf: DataFrame ={
loadConnectionDataFromCk()
val sslSql =
"""
|SELECT
| ssl_sni AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'TLS' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'SSL'
|GROUP BY
| ssl_sni,common_server_ip
""".stripMargin
val httpSql =
"""
|SELECT
| http_host AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'HTTP' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'HTTP'
|GROUP BY
| http_host,common_server_ip
""".stripMargin
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
LOG.warn(sql)
val relationFqdnLocateIpDf = spark.sql(sql)
relationFqdnLocateIpDf.printSchema()
relationFqdnLocateIpDf
}
private def getTimeLimit: (Long,Long) ={
var maxTime = 0L
var minTime = 0L
ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE match {
case 0 =>
maxTime = currentHour
minTime = maxTime - ApplicationConfig.UPDATE_INTERVAL
case 1 =>
maxTime = ApplicationConfig.READ_CLICKHOUSE_MAX_TIME
minTime = ApplicationConfig.READ_CLICKHOUSE_MIN_TIME
case _ =>
}
(maxTime, minTime)
}
}

View File

@@ -1,5 +1,22 @@
package cn.ac.iie.main
object IpLearningApplication {
import cn.ac.iie.service.update.UpdateDocument._
import cn.ac.iie.utils.{ExecutorThreadPool, SparkSessionUtil}
object IpLearningApplication {
private val pool = ExecutorThreadPool.getInstance
def main(args: Array[String]): Unit = {
try {
updateVertexFqdn()
updateVertexIp()
updateRelationFqdnLocateIp()
}catch {
case e:Exception => e.printStackTrace()
}finally {
pool.shutdown()
arangoManger.clean()
SparkSessionUtil.closeSpark()
}
}
}

View File

@@ -1,5 +1,88 @@
package cn.ac.iie.service.transform
import java.util.regex.Pattern
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseClickhouseData
import cn.ac.iie.spark.partition.CustomPartitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions._
import org.slf4j.LoggerFactory
object MergeDataFrame {
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
private val pattern = Pattern.compile("^[\\d]*$")
def mergeVertexFqdn(): RDD[Row] ={
BaseClickhouseData.getVertexFqdnDf
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row))
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
}
def mergeVertexIp(): RDD[Row]={
val vertexIpDf = BaseClickhouseData.getVertexIpDf
val frame = vertexIpDf.groupBy("IP").agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
collect_list("ip_type").alias("ip_type_list")
)
val values = frame.rdd.map(row => (row.get(0), row))
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
values
}
def mergeRelationFqdnLocateIp(): RDD[Row] ={
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
.groupBy("FQDN", "common_server_ip")
.agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
collect_list("schema_type").alias("schema_type_list"),
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
)
frame.rdd.map(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip")
val key = fqdn.concat("-"+serverIp)
(key,row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
}
private def isDomain(fqdn: String): Boolean = {
try {
if (fqdn == null || fqdn.length == 0) {
return false
}
if (fqdn.contains(":")) {
val s = fqdn.split(":")(0)
if (s.contains(":")){
return false
}
}
val fqdnArr = fqdn.split("\\.")
if (fqdnArr.length < 4 || fqdnArr.length > 4){
return true
}
for (f <- fqdnArr) {
if (pattern.matcher(f).matches) {
val i = f.toLong
if (i < 0 || i > 255) {
return true
}
} else {
return true
}
}
} catch {
case e: Exception =>
LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString)
}
false
}
}

View File

@@ -1,5 +1,123 @@
package cn.ac.iie.service.update
import java.lang
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.service.read.ReadHistoryArangoData
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import scala.collection.mutable
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocHandler {
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
if (newAttribute > hisAttritube){
hisAttritube = newAttribute
}
hisDoc.addAttribute(attributeName,hisAttritube)
}
def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
}
def separateAttributeByIpType(ipTypeList:ofRef[String],
sessionCountList:ofRef[AnyRef],
bytesSumList:ofRef[AnyRef]): (Long,Long,Long,Long) ={
var serverSessionCount = 0L
var serverBytesSum = 0L
var clientSessionCount = 0L
var clientBytesSum = 0L
if (ipTypeList.length == sessionCountList.length && ipTypeList.length == bytesSumList.length){
sessionCountList.zip(bytesSumList).zip(ipTypeList).foreach(t => {
t._2 match {
case "server" =>
serverSessionCount = t._1._1.toString.toLong
serverBytesSum = t._1._2.toString.toLong
case "client" =>
clientSessionCount = t._1._1.toString.toLong
clientBytesSum = t._1._2.toString.toLong
}
})
}
(serverSessionCount, serverBytesSum, clientSessionCount, clientBytesSum)
}
def separateAttributeByProtocol(schemaTypeList:ofRef[AnyRef],countTotalList:ofRef[AnyRef]): Map[String, Long] ={
var protocolMap: Map[String, Long] = Map()
if (schemaTypeList.length == countTotalList.length){
protocolMap = schemaTypeList.zip(countTotalList).map(t => (t._1.toString,t._2.toString.toLong)).toMap
}
PROTOCOL_SET.foreach(protocol => {
if (!protocolMap.contains(protocol)){
protocolMap += (protocol -> 0L)
}
})
protocolMap
}
def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
protocolMap.foreach(t => {
if (t._2 > 0 && !protocolType.contains(t._1)){
protocolType = protocolType.concat(","+ t._1)
}
val cntTotalName = t._1.concat("_CNT_TOTAL")
val cntRecentName = t._1.concat("_CNT_RECENT")
val cntRecent: Array[lang.Long] = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[java.lang.Long]]
cntRecent.update(0,t._2)
updateSumAttribute(hisDoc,t._2,cntTotalName)
hisDoc.addAttribute(cntRecentName,cntRecent)
})
hisDoc.addAttribute("PROTOCOL_TYPE",protocolType)
}
def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
val protocolTypeBuilder = new mutable.StringBuilder()
protocolMap.foreach(t => {
if (t._2 > 0){
protocolTypeBuilder.append(","+t._1)
}
val cntTotalName = t._1.concat("_CNT_TOTAL")
val cntRecentName = t._1.concat("_CNT_RECENT")
val cntRecent: Array[Long] = new Array[Long](ApplicationConfig.RECENT_COUNT_HOUR)
cntRecent.update(0,t._2)
doc.addAttribute(cntTotalName,t._2)
doc.addAttribute(cntRecentName,cntRecent)
})
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
}
def mergeDistinctIp(distCipRecent:ofRef[ofRef[String]]): Array[String] ={
distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
}
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
val map = newDistinctIp.map(ip => {
(ip, ReadHistoryArangoData.currentHour)
}).toMap
doc.addAttribute("DIST_CIP",map.keys.toArray)
doc.addAttribute("DIST_CIP_TS",map.values.toArray)
}
def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[Array[Long]]
if (hisDistCip.length == hisDistCipTs.length){
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
newDistinctIp.foreach(cip => {
muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
})
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray)
}
}
}

View File

@@ -1,5 +1,194 @@
package cn.ac.iie.service.update
import java.util
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import cn.ac.iie.service.transform.MergeDataFrame._
import cn.ac.iie.service.update.UpdateDocHandler._
import cn.ac.iie.utils.ArangoDBConnect
import cn.ac.iie.utils.SparkSessionUtil.spark
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.slf4j.LoggerFactory
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocument {
val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance()
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
private val baseArangoData = new BaseArangoData()
def updateVertexFqdn(): Unit ={
baseArangoData.readHistoryData("FQDN",historyVertexFqdnMap,classOf[BaseDocument])
val hisVerFqdnBc = spark.sparkContext.broadcast(historyVertexFqdnMap)
try {
val start = System.currentTimeMillis()
val mergeVertexFqdnDf: RDD[Row] = mergeVertexFqdn()
mergeVertexFqdnDf.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val hisVerFqdnMapTmp = hisVerFqdnBc.value.get(partitionId)
val resultDocumentList: util.ArrayList[BaseDocument] = new util.ArrayList[BaseDocument]
var i = 0
iter.foreach(row => {
val fqdn = row.getAs[String]("FQDN")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
var document: BaseDocument = hisVerFqdnMapTmp.getOrDefault(fqdn,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
} else{
document = new BaseDocument
document.setKey(fqdn)
document.addAttribute("FQDN_NAME",fqdn)
document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
}
resultDocumentList.add(document)
i+=1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
arangoManger.overwrite(resultDocumentList, "FQDN")
LOG.warn("更新FQDN:" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "FQDN")
LOG.warn("更新FQDN:" + i)
}
})
val last = System.currentTimeMillis()
LOG.warn(s"更新FQDN时间${last-start}")
}catch {
case e:Exception => e.printStackTrace()
}finally {
hisVerFqdnBc.destroy()
}
}
def updateVertexIp(): Unit ={
baseArangoData.readHistoryData("IP",historyVertexIpMap,classOf[BaseDocument])
val hisVerIpBc = spark.sparkContext.broadcast(historyVertexIpMap)
try {
val start = System.currentTimeMillis()
val mergeVertexIpDf = mergeVertexIp()
mergeVertexIpDf.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val hisVerIpMapTmp = hisVerIpBc.value.get(partitionId)
val resultDocumentList: util.ArrayList[BaseDocument] = new util.ArrayList[BaseDocument]
var i = 0
iter.foreach(row => {
val ip = row.getAs[String]("IP")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
val sepAttributeTuple = separateAttributeByIpType(ipTypeList,sessionCountList,bytesSumList)
var document = hisVerIpMapTmp.getOrDefault(ip,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
updateSumAttribute(document,sepAttributeTuple._1,"SERVER_SESSION_COUNT")
updateSumAttribute(document,sepAttributeTuple._2,"SERVER_BYTES_SUM")
updateSumAttribute(document,sepAttributeTuple._3,"CLIENT_SESSION_COUNT")
updateSumAttribute(document,sepAttributeTuple._4,"CLIENT_BYTES_SUM")
} else {
document = new BaseDocument
document.setKey(ip)
document.addAttribute("IP",ip)
document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
document.addAttribute("SERVER_SESSION_COUNT",sepAttributeTuple._1)
document.addAttribute("SERVER_BYTES_SUM",sepAttributeTuple._2)
document.addAttribute("CLIENT_SESSION_COUNT",sepAttributeTuple._3)
document.addAttribute("CLIENT_BYTES_SUM",sepAttributeTuple._4)
document.addAttribute("COMMON_LINK_INFO","")
}
resultDocumentList.add(document)
i+=1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn("更新IP:" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn("更新IP:" + i)
}
})
val last = System.currentTimeMillis()
LOG.warn(s"更新IP时间${last-start}")
}catch {
case e:Exception => e.printStackTrace()
}finally {
hisVerIpBc.destroy()
}
}
def updateRelationFqdnLocateIp(): Unit ={
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
val hisReFqdnLocIpBc = spark.sparkContext.broadcast(historyRelationFqdnAddressIpMap)
try {
val start = System.currentTimeMillis()
val mergeRelationFqdnLocateIpDf = mergeRelationFqdnLocateIp()
mergeRelationFqdnLocateIpDf.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val hisRelaFqdnLocaIpMapTmp = hisReFqdnLocIpBc.value.get(partitionId)
val resultDocumentList: util.ArrayList[BaseEdgeDocument] = new util.ArrayList[BaseEdgeDocument]
var i = 0
iter.foreach(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
val distCipRecent = row.getAs[ofRef[ofRef[String]]]("DIST_CIP_RECENT")
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList,countTotalList)
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
val key = fqdn.concat("-"+serverIp)
var document: BaseEdgeDocument = hisRelaFqdnLocaIpMapTmp.getOrDefault(key,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
updateProtocolAttritube(document,sepAttritubeMap)
updateDistinctIp(document,distinctIp)
}else {
document = new BaseEdgeDocument()
document.setKey(key)
document.setFrom("FQDN/" + fqdn)
document.setTo("IP/" + serverIp)
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
putProtocolAttritube(document,sepAttritubeMap)
putDistinctIp(document,distinctIp)
}
resultDocumentList.add(document)
i+=1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
arangoManger.overwrite(resultDocumentList, "R_LOCATE_FQDN2IP")
LOG.warn("更新R_LOCATE_FQDN2IP:" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "R_LOCATE_FQDN2IP")
LOG.warn("更新R_LOCATE_FQDN2IP:" + i)
}
})
val last = System.currentTimeMillis()
LOG.warn(s"更新R_LOCATE_FQDN2IP时间${last-start}")
}catch {
case e:Exception => e.printStackTrace()
}finally {
hisReFqdnLocIpBc.destroy()
}
}
}

View File

@@ -1,4 +1,4 @@
package cn.ac.iie.service.partition
package cn.ac.iie.spark.partition
import org.apache.spark.Partitioner

View File

@@ -1,5 +1,32 @@
package cn.ac.iie.utils
import cn.ac.iie.config.ApplicationConfig
import org.apache.spark.sql.SparkSession
import org.slf4j.LoggerFactory
object SparkSessionUtil {
private val LOG = LoggerFactory.getLogger(SparkSessionUtil.getClass)
val spark: SparkSession = getSparkSession
private def getSparkSession: SparkSession ={
val spark: SparkSession = SparkSession
.builder()
.appName(ApplicationConfig.SPARK_APP_NAME)
.config("spark.serializer", ApplicationConfig.SPARK_SERIALIZER)
.config("spark.network.timeout", ApplicationConfig.SPARK_NETWORK_TIMEOUT)
.config("spark.sql.shuffle.partitions", ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY)
.master(ApplicationConfig.MASTER)
.getOrCreate()
LOG.warn("sparkession获取成功")
spark
}
def closeSpark(): Unit ={
if (spark != null){
spark.stop()
}
}
}