4 Commits

11 changed files with 42 additions and 30 deletions

View File

@@ -20,6 +20,6 @@ read.clickhouse.max.time=1596684142
read.clickhouse.min.time=1596425769
update.interval=3600
distinct.client.ip.num=10000
distinct.client.ip.num=1000
recent.count.hour=24

View File

@@ -83,8 +83,7 @@ public class ArangoDBConnect {
collection.replaceDocuments(docUpdate);
}
} catch (Exception e) {
System.out.println("更新失败");
e.printStackTrace();
LOG.error("update failure" + e.toString());
} finally {
docInsert.clear();
docInsert.clear();
@@ -102,11 +101,11 @@ public class ArangoDBConnect {
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
for (ErrorEntity errorEntity : errors) {
LOG.warn("写入arangoDB异常" + errorEntity.getErrorMessage());
LOG.warn("write arangoDB error" + errorEntity.getErrorMessage());
}
}
} catch (Exception e) {
LOG.error("更新失败" + e.toString());
LOG.error("update failure" + e.toString());
} finally {
docOverwrite.clear();
}

View File

@@ -3,6 +3,7 @@ spark.sql.shuffle.partitions=10
spark.executor.memory=4g
spark.executor.cores=1
spark.cores.max=10
spark.local.dir=./tmp
spark.app.name=test
spark.network.timeout=300s
spark.serializer=org.apache.spark.serializer.KryoSerializer
@@ -19,7 +20,7 @@ spark.read.clickhouse.session.table=session_record
spark.read.clickhouse.radius.table=radius_record
clickhouse.socket.timeout=300000
#arangoDB配置
arangoDB.host=192.168.44.83
arangoDB.host=192.168.44.12
arangoDB.port=8529
arangoDB.user=root
arangoDB.password=galaxy_2019
@@ -29,7 +30,7 @@ arangoDB.ttl=3600
thread.pool.number=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围
clickhouse.time.limit.type=1
clickhouse.time.limit.type=0
read.clickhouse.max.time=1634902508
read.clickhouse.min.time=1631759985
@@ -42,4 +43,3 @@ update.interval=3600
arangodb.total.num=20000000
#读取radius时间范围,与radius任务执行周期一致,单位:分钟
read.radius.granularity=-30
vsys.id=1

View File

@@ -14,6 +14,7 @@ object ApplicationConfig {
// val REPARTITION_NUMBER: Int = config.getInt("repartitionNumber")
val MASTER: String = config.getString("master")
val SPARK_SERIALIZER: String = config.getString("spark.serializer")
val SPARK_LOCAL_DIR: String = config.getString("spark.local.dir")
val NUMPARTITIONS: String = config.getString("spark.read.clickhouse.numPartitions")
val SPARK_READ_CLICKHOUSE_URL: String = config.getString("spark.read.clickhouse.url")

View File

@@ -15,7 +15,7 @@ object BaseArangoData {
def loadArangoRdd[T: ClassTag](name:String): ArangoRdd[T] ={
val value = ArangoSpark.load[T](sparkContext, name, options)
LOG.warn(s"读取$name arangoDb:${value.count()}")
LOG.warn(s"read $name arangoDb: ${value.count()}")
value
}

View File

@@ -1,13 +1,13 @@
package cn.ac.iie.dao
import java.util.Date
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.utils.SparkSessionUtil.spark
import com.zdjizhi.utils.DateUtils
import org.apache.spark.sql.DataFrame
import org.slf4j.LoggerFactory
import java.util.Date
object BaseClickhouseData {
private val LOG = LoggerFactory.getLogger(BaseClickhouseData.getClass)
@@ -100,12 +100,12 @@ object BaseClickhouseData {
s"""
|(SELECT * FROM
|((SELECT ssl_sni AS FQDN,server_ip,MAX(recv_time) AS LAST_FOUND_TIME,MIN(recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(client_ip)) AS DIST_CIP_RECENT,'TLS' AS decoded_as,vsys_id AS VSYS_ID
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(client_ip)) AS DIST_CIP_RECENT,'TLS' AS decoded_as_list, vsys_id AS VSYS_ID
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|WHERE $where and decoded_as = 'SSL' GROUP BY ssl_sni,server_ip,vsys_id)
|UNION ALL
|(SELECT http_host AS FQDN,server_ip,MAX(recv_time) AS LAST_FOUND_TIME,MIN(recv_time) AS FIRST_FOUND_TIME,COUNT(*) AS COUNT_TOTAL,
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(client_ip)) AS DIST_CIP_RECENT,'HTTP' AS decoded_as,vsys_id AS VSYS_ID
|toString(groupUniqArray(${ApplicationConfig.DISTINCT_CLIENT_IP_NUM})(client_ip)) AS DIST_CIP_RECENT,'HTTP' AS decoded_as_list,vsys_id AS VSYS_ID
|FROM ${ApplicationConfig.SPARK_READ_CLICKHOUSE_SESSION_TABLE}
|WHERE $where and decoded_as = 'HTTP' GROUP BY http_host,server_ip,vsys_id))
|WHERE FQDN != '') as dbtable

View File

@@ -2,6 +2,7 @@ package cn.ac.iie.main
import cn.ac.iie.service.update.UpdateDocument
@Deprecated
object IpRecommendApplication {
def main(args: Array[String]): Unit = {

View File

@@ -2,6 +2,7 @@ package cn.ac.iie.main
import cn.ac.iie.service.update.UpdateDocument
@Deprecated
object SubscriberRecommendApplication {
def main(args: Array[String]): Unit = {

View File

@@ -50,22 +50,31 @@ object MergeDataFrame {
def mergeRelationFqdnLocateIp(): RDD[(String, (Option[BaseEdgeDocument], Row))] = {
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf
.repartition().filter(row => isDomain(row.getAs[String]("FQDN")))
.repartition()
.filter(row => isDomain(row.getAs[String]("FQDN")))
.groupBy("FQDN", "server_ip", "VSYS_ID")
.agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
collect_list("decoded_as").alias("decoded_as_list"),
collect_list("decoded_as_list").alias("decoded_as_list"),
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
)
val fqdnLocIpRddRow = frame.rdd.map(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("server_ip")
val vsysId = row.getAs[Integer]("VSYS_ID").toLong
val key = fqdn.concat("-" + serverIp + "-" + vsysId)
(key, row)
}) /*.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS))*/
val fqdnLocIpRddRow = frame.rdd
.filter(row => {
// 检查 server_ip 和 VSYS_ID 是否为 null
val serverIp = row.getAs[String]("server_ip")
val vsysId = row.getAs[Integer]("VSYS_ID")
serverIp != null && vsysId != null
})
.map(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("server_ip")
val vsysId = row.getAs[Integer]("VSYS_ID").toLong
val key = fqdn.concat("-" + serverIp + "-" + vsysId)
(key, row)
})
val fqdnLocIpRddDoc = BaseArangoData.loadArangoRdd[BaseEdgeDocument]("R_LOCATE_FQDN2IP")
fqdnLocIpRddDoc.map(doc => (doc.getKey, doc)).rightOuterJoin(fqdnLocIpRddRow)
@@ -133,7 +142,7 @@ object MergeDataFrame {
}
} catch {
case e: Exception =>
LOG.error("解析域名 " + fqdn + " 失败\n" + e.toString)
LOG.error("Domain name resolution " + fqdn + " failure\n" + e.toString)
}
false

View File

@@ -78,20 +78,20 @@ object UpdateDocument {
i += 1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
arangoManger.overwrite(resultDocumentList, collName)
LOG.warn(s"更新:$collName" + i)
LOG.warn(s"update $collName: " + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, collName)
LOG.warn(s"更新$collName:" + i)
LOG.warn(s"update $collName: " + i)
}
})
LOG.warn(s"更新$collName 条数:${fqdnAccmu.value}")
LOG.warn(s"update $collName rows: ${fqdnAccmu.value}")
val last = System.currentTimeMillis()
LOG.warn(s"更新$collName 时间:${last - start}")
LOG.warn(s"update $collName time: ${last - start}")
} catch {
case e: Exception => e.printStackTrace()
}
@@ -107,13 +107,13 @@ object UpdateDocument {
i += 1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn(s"更新:IP" + i)
LOG.warn(s"update IP: " + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn(s"更新IP:" + i)
LOG.warn(s"update IP: " + i)
}
})
}

View File

@@ -23,12 +23,13 @@ object SparkSessionUtil {
.config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY)
.config("spark.executor.cores",ApplicationConfig.SPARK_EXECUTOR_CORES)
.config("spark.cores.max",ApplicationConfig.SPARK_CORES_MAX)
.config("spark.local.dir",ApplicationConfig.SPARK_LOCAL_DIR)
.config("arangodb.hosts", s"${ApplicationConfig.ARANGODB_HOST}:${ApplicationConfig.ARANGODB_PORT}")
.config("arangodb.user", ApplicationConfig.ARANGODB_USER)
.config("arangodb.password", ApplicationConfig.ARANGODB_PASSWORD)
.master(ApplicationConfig.MASTER)
.getOrCreate()
LOG.warn("sparkession获取成功")
LOG.warn("spark session start success")
spark
}