From d07378ef8f5d2e5abafedccc5c424502cd29fbb6 Mon Sep 17 00:00:00 2001
From: wanglihui <949764788@qq.com>
Date: Thu, 6 Aug 2020 11:34:46 +0800
Subject: [PATCH 1/6] =?UTF-8?q?=E5=A2=9E=E5=8A=A0SUBSCRIBER=5FID?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../ac/iie/service/ingestion/ReadClickhouseData.java | 1 +
.../src/main/resources/application.properties | 12 ++++++------
.../src/main/resources/clickhouse.properties | 8 ++++----
.../src/test/java/cn/ac/iie/readHistoryDataTest.java | 10 ++++++++++
4 files changed, 21 insertions(+), 10 deletions(-)
create mode 100644 IP-learning-graph/src/test/java/cn/ac/iie/readHistoryDataTest.java
diff --git a/IP-learning-graph/src/main/java/cn/ac/iie/service/ingestion/ReadClickhouseData.java b/IP-learning-graph/src/main/java/cn/ac/iie/service/ingestion/ReadClickhouseData.java
index 12772bd..b16be3b 100644
--- a/IP-learning-graph/src/main/java/cn/ac/iie/service/ingestion/ReadClickhouseData.java
+++ b/IP-learning-graph/src/main/java/cn/ac/iie/service/ingestion/ReadClickhouseData.java
@@ -105,6 +105,7 @@ public class ReadClickhouseData {
newDoc.setKey(subscriberId);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
+ newDoc.addAttribute("SUBSCRIBER_ID",subscriberId);
} catch (Exception e) {
e.printStackTrace();
}
diff --git a/IP-learning-graph/src/main/resources/application.properties b/IP-learning-graph/src/main/resources/application.properties
index ed8bb1f..dd055d9 100644
--- a/IP-learning-graph/src/main/resources/application.properties
+++ b/IP-learning-graph/src/main/resources/application.properties
@@ -1,6 +1,6 @@
#arangoDB参数配置
-#arangoDB.host=192.168.40.182
-arangoDB.host=192.168.40.224
+arangoDB.host=192.168.40.182
+#arangoDB.host=192.168.40.224
arangoDB.port=8529
arangoDB.user=upsert
arangoDB.password=ceiec2018
@@ -9,7 +9,7 @@ arangoDB.DB.name=ip-learning-test
arangoDB.batch=100000
arangoDB.ttl=3600
-arangoDB.read.limit=limit 100
+arangoDB.read.limit=
update.arango.batch=10000
thread.pool.number=10
@@ -17,9 +17,9 @@ thread.await.termination.time=10
#读取clickhouse时间范围方式,0:读取过去一小时,1:指定时间范围
-time.limit.type=0
-read.clickhouse.max.time=1595833062
-read.clickhouse.min.time=1595833060
+time.limit.type=1
+read.clickhouse.max.time=1596684142
+read.clickhouse.min.time=1596425769
update.interval=3600
distinct.client.ip.num=10000
diff --git a/IP-learning-graph/src/main/resources/clickhouse.properties b/IP-learning-graph/src/main/resources/clickhouse.properties
index f3607e9..3b18aa4 100644
--- a/IP-learning-graph/src/main/resources/clickhouse.properties
+++ b/IP-learning-graph/src/main/resources/clickhouse.properties
@@ -1,9 +1,9 @@
drivers=ru.yandex.clickhouse.ClickHouseDriver
mdb.user=default
-#db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
-#mdb.password=111111
-db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=300000
-mdb.password=ceiec2019
+db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
+mdb.password=111111
+#db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=300000
+#mdb.password=ceiec2019
initialsize=1
minidle=1
maxactive=50
diff --git a/IP-learning-graph/src/test/java/cn/ac/iie/readHistoryDataTest.java b/IP-learning-graph/src/test/java/cn/ac/iie/readHistoryDataTest.java
new file mode 100644
index 0000000..56f9b50
--- /dev/null
+++ b/IP-learning-graph/src/test/java/cn/ac/iie/readHistoryDataTest.java
@@ -0,0 +1,10 @@
+package cn.ac.iie;
+
+import cn.ac.iie.dao.BaseArangoData;
+import com.arangodb.entity.BaseEdgeDocument;
+
+public class readHistoryDataTest {
+ public static void main(String[] args) {
+ BaseArangoData baseArangoData = new BaseArangoData();
+ }
+}
From 4e58044a164304d71fadb243cbe4629985e8bc7c Mon Sep 17 00:00:00 2001
From: wanglihui <949764788@qq.com>
Date: Thu, 6 Aug 2020 16:11:16 +0800
Subject: [PATCH 2/6] =?UTF-8?q?=E5=88=A0=E9=99=A4=E5=8E=9F=E5=A7=8BIP=20Le?=
=?UTF-8?q?arning=E9=A1=B9=E7=9B=AE=E5=8C=85?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
{ip-learning => ip-learning-spark}/.gitignore | 2 +-
.../.idea/libraries/scala_sdk_2_11_7.xml | 25 +
.../java/cn/ac/iie/dao/BaseArangoData.java | 103 ++++
.../service/read/ReadHistoryArangoData.java | 85 ++++
.../java/cn/ac/iie/utils/ArangoDBConnect.java | 116 +++++
.../cn/ac/iie/utils/ExecutorThreadPool.java | 67 +++
.../src/main/resources/application.properties | 0
.../src/main/resources/log4j.properties | 25 +
.../cn/ac/iie/config/ApplicationConfig.scala | 5 +
.../cn/ac/iie/dao/BaseClickhouseData.scala | 5 +
.../ac/iie/main/IpLearningApplication.scala | 5 +
.../service/transform/MergeDataFrame.scala | 5 +
.../iie/service/update/UpdateDocHandler.scala | 5 +
.../iie/service/update/UpdateDocument.scala | 5 +
.../spark/partition/CustomPartitioner.scala | 11 +
.../cn/ac/iie/utils/SparkSessionUtil.scala | 2 +-
.../ac/iie/dao/BaseClickhouseDataTest.scala | 5 +
.../service/update/UpdateDocumentTest.scala | 5 +
ip-learning/pom.xml | 211 ---------
.../src/main/resources/application.conf | 26 -
.../src/main/resources/log4j.properties | 40 --
ip-learning/src/main/resources/spark-env.sh | 71 ---
.../cn/ac/iie/dao/BaseMediaDataLoad.scala | 125 -----
.../cn/ac/iie/dao/UpdateArangoGraph.scala | 177 -------
.../dao/UpdateArangoGraphByArangoSpark.scala | 237 ----------
.../cn/ac/iie/dao/UpdateArangoGraphByDF.scala | 250 ----------
.../scala/cn/ac/iie/etl/CursorTransform.scala | 33 --
.../ac/iie/main/IPLearningApplication.scala | 29 --
.../cn/ac/iie/pojo/BaseEdgeIPVisitFqdn.scala | 34 --
.../ac/iie/pojo/BaseEgdeFqdnAddressIP.scala | 34 --
.../scala/cn/ac/iie/pojo/BaseVertexFqdn.scala | 30 --
.../scala/cn/ac/iie/pojo/BaseVertexIP.scala | 32 --
.../cn/ac/iie/test/ArangoDBSparkTest.scala | 52 --
.../cn/ac/iie/test/ArangoDbReadV_IPTest.scala | 37 --
.../scala/cn/ac/iie/test/ArangoDbTest.scala | 314 ------------
.../cn/ac/iie/test/ArangoDbTestMemory.scala | 355 --------------
.../iie/test/ArangoDbTestMemoryGroupBy.scala | 40 --
.../main/scala/cn/ac/iie/test/Config.scala | 22 -
.../cn/ac/iie/test/ReadClickhouseTest.scala | 447 ------------------
.../cn/ac/iie/test/TestBaseEdgeDocument.scala | 29 --
.../test/TestBaseEdgeDocumentDataFrame.scala | 35 --
.../scala/cn/ac/iie/test/TestIndices.scala | 219 ---------
.../scala/cn/ac/iie/test/TestSparkJoin.scala | 56 ---
.../scala/cn/ac/iie/utils/ConfigUtils.scala | 34 --
.../cn/ac/iie/utils/InitArangoDBPool.scala | 24 -
.../java/cn/ac/iie/CreateObjectJavaTest.java | 32 --
.../scala/cn/ac/iie/CreateObjectTest.scala | 157 ------
.../test/scala/cn/ac/iie/HiveUnionTest.scala | 78 ---
.../src/test/scala/cn/ac/iie/TestMap.scala | 11 -
49 files changed, 474 insertions(+), 3273 deletions(-)
rename {ip-learning => ip-learning-spark}/.gitignore (90%)
create mode 100644 ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml
create mode 100644 ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java
create mode 100644 ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java
create mode 100644 ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java
create mode 100644 ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java
rename {ip-learning => ip-learning-spark}/src/main/resources/application.properties (100%)
create mode 100644 ip-learning-spark/src/main/resources/log4j.properties
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala
create mode 100644 ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala
rename ip-learning/src/main/scala/cn/ac/iie/utils/DateTimeUtils.scala => ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala (51%)
create mode 100644 ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala
create mode 100644 ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala
delete mode 100644 ip-learning/pom.xml
delete mode 100644 ip-learning/src/main/resources/application.conf
delete mode 100644 ip-learning/src/main/resources/log4j.properties
delete mode 100644 ip-learning/src/main/resources/spark-env.sh
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/dao/BaseMediaDataLoad.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraph.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByArangoSpark.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByDF.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/etl/CursorTransform.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/main/IPLearningApplication.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEdgeIPVisitFqdn.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEgdeFqdnAddressIP.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexFqdn.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexIP.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/ArangoDBSparkTest.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbReadV_IPTest.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTest.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemory.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemoryGroupBy.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/Config.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/ReadClickhouseTest.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocument.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocumentDataFrame.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/TestIndices.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/test/TestSparkJoin.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/utils/ConfigUtils.scala
delete mode 100644 ip-learning/src/main/scala/cn/ac/iie/utils/InitArangoDBPool.scala
delete mode 100644 ip-learning/src/test/java/cn/ac/iie/CreateObjectJavaTest.java
delete mode 100644 ip-learning/src/test/scala/cn/ac/iie/CreateObjectTest.scala
delete mode 100644 ip-learning/src/test/scala/cn/ac/iie/HiveUnionTest.scala
delete mode 100644 ip-learning/src/test/scala/cn/ac/iie/TestMap.scala
diff --git a/ip-learning/.gitignore b/ip-learning-spark/.gitignore
similarity index 90%
rename from ip-learning/.gitignore
rename to ip-learning-spark/.gitignore
index a77ab78..5db5dd3 100644
--- a/ip-learning/.gitignore
+++ b/ip-learning-spark/.gitignore
@@ -6,4 +6,4 @@
.idea
*.iml
target
-spark-warehouse/
+logs/
diff --git a/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml b/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml
new file mode 100644
index 0000000..9a0159e
--- /dev/null
+++ b/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml
@@ -0,0 +1,25 @@
+
+
+
+ Scala_2_11
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java b/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java
new file mode 100644
index 0000000..af47dcf
--- /dev/null
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java
@@ -0,0 +1,103 @@
+package cn.ac.iie.dao;
+
+import cn.ac.iie.config.ApplicationConfig;
+import cn.ac.iie.service.read.ReadHistoryArangoData;
+import cn.ac.iie.utils.ArangoDBConnect;
+import cn.ac.iie.utils.ExecutorThreadPool;
+import com.arangodb.ArangoCursor;
+import com.arangodb.entity.BaseDocument;
+import com.arangodb.entity.BaseEdgeDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * 获取arangoDB历史数据
+ *
+ * @author wlh
+ */
+public class BaseArangoData {
+ private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
+
+ static ConcurrentHashMap> historyVertexFqdnMap = new ConcurrentHashMap<>();
+ static ConcurrentHashMap> historyVertexIpMap = new ConcurrentHashMap<>();
+ static ConcurrentHashMap> historyVertexSubscriberMap = new ConcurrentHashMap<>();
+ static ConcurrentHashMap> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
+ static ConcurrentHashMap> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
+ static ConcurrentHashMap> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
+ static ConcurrentHashMap> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
+
+ private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
+
+ private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
+
+ void readHistoryData(String table,
+ ConcurrentHashMap> historyMap,
+ Class type) {
+ try {
+ LOG.info("开始更新" + table);
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
+ historyMap.put(i, new ConcurrentHashMap<>());
+ }
+ CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
+ long[] timeRange = getTimeRange(table);
+ for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
+ String sql = getQuerySql(timeRange, i, table);
+ ReadHistoryArangoData readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
+ threadPool.executor(readHistoryArangoData);
+ }
+ countDownLatch.await();
+ long last = System.currentTimeMillis();
+ LOG.info("读取" + table + " arangoDB 共耗时:" + (last - start));
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ private long[] getTimeRange(String table) {
+ long minTime = 0L;
+ long maxTime = 0L;
+ long startTime = System.currentTimeMillis();
+ String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
+ switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE) {
+ case 0:
+ ArangoCursor timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
+ try {
+ if (timeDoc != null) {
+ while (timeDoc.hasNext()) {
+ BaseDocument doc = timeDoc.next();
+ maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
+ minTime = Long.parseLong(doc.getAttribute("min_time").toString());
+ }
+ } else {
+ LOG.warn("获取ArangoDb时间范围为空");
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ break;
+ case 1:
+ maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME;
+ minTime = ApplicationConfig.READ_ARANGO_MIN_TIME;
+ break;
+ default:
+ }
+ long lastTime = System.currentTimeMillis();
+ LOG.info(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
+ return new long[]{minTime, maxTime};
+
+ }
+
+ private String getQuerySql(long[] timeRange, int threadNumber, String table) {
+ long minTime = timeRange[0];
+ long maxTime = timeRange[1];
+ long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER;
+ long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
+ long minThreadTime = minTime + threadNumber * diffTime;
+ return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT + " RETURN doc";
+ }
+
+}
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java b/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java
new file mode 100644
index 0000000..0b4eda5
--- /dev/null
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java
@@ -0,0 +1,85 @@
+package cn.ac.iie.service.read;
+
+import cn.ac.iie.config.ApplicationConfig;
+import cn.ac.iie.utils.ArangoDBConnect;
+import com.arangodb.ArangoCursor;
+import com.arangodb.entity.BaseDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+
+import static cn.ac.iie.service.read.ReadClickhouseData.RECENT_COUNT_HOUR;
+
+/**
+ * @author wlh
+ * 多线程全量读取arangoDb历史数据,封装到map
+ */
+public class ReadHistoryArangoData extends Thread {
+ private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
+
+ private ArangoDBConnect arangoConnect;
+ private String query;
+ private ConcurrentHashMap> map;
+ private Class type;
+ private String table;
+ private CountDownLatch countDownLatch;
+
+ public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
+ String query,
+ ConcurrentHashMap> map,
+ Class type,
+ String table,
+ CountDownLatch countDownLatch) {
+ this.arangoConnect = arangoConnect;
+ this.query = query;
+ this.map = map;
+ this.type = type;
+ this.table = table;
+ this.countDownLatch = countDownLatch;
+ }
+
+ @Override
+ public void run() {
+ try {
+ long s = System.currentTimeMillis();
+ ArangoCursor docs = arangoConnect.executorQuery(query, type);
+ if (docs != null) {
+ List baseDocuments = docs.asListRemaining();
+ int i = 0;
+ for (T doc : baseDocuments) {
+ String key = doc.getKey();
+ int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
+ ConcurrentHashMap tmpMap = map.get(hashCode);
+ tmpMap.put(key, doc);
+ i++;
+ }
+ long l = System.currentTimeMillis();
+ LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
+ }
+ }catch (Exception e){
+ e.printStackTrace();
+ }finally {
+ countDownLatch.countDown();
+ LOG.info("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
+ }
+ }
+
+ private void updateProtocolDocument(T doc) {
+ if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
+ for (String protocol : ReadClickhouseData.PROTOCOL_SET) {
+ String protocolRecent = protocol + "_CNT_RECENT";
+ ArrayList cntRecent = (ArrayList) doc.getAttribute(protocolRecent);
+ Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
+ Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
+ System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
+ cntRecentsDst[0] = 0L;
+ doc.addAttribute(protocolRecent, cntRecentsDst);
+ }
+ }
+ }
+
+}
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java
new file mode 100644
index 0000000..fc62f08
--- /dev/null
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java
@@ -0,0 +1,116 @@
+package cn.ac.iie.utils;
+
+import cn.ac.iie.config.ApplicationConfig;
+import com.arangodb.ArangoCollection;
+import com.arangodb.ArangoCursor;
+import com.arangodb.ArangoDB;
+import com.arangodb.ArangoDatabase;
+import com.arangodb.entity.DocumentCreateEntity;
+import com.arangodb.entity.ErrorEntity;
+import com.arangodb.entity.MultiDocumentEntity;
+import com.arangodb.model.AqlQueryOptions;
+import com.arangodb.model.DocumentCreateOptions;
+import com.arangodb.util.MapBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+
+public class ArangoDBConnect {
+ private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
+ private static ArangoDB arangoDB = null;
+ private static ArangoDBConnect conn = null;
+ static {
+ getArangoDatabase();
+ }
+
+ private static void getArangoDatabase(){
+ arangoDB = new ArangoDB.Builder()
+ .maxConnections(ApplicationConfig.THREAD_POOL_NUMBER)
+ .host(ApplicationConfig.ARANGODB_HOST, ApplicationConfig.ARANGODB_PORT)
+ .user(ApplicationConfig.ARANGODB_USER)
+ .password(ApplicationConfig.ARANGODB_PASSWORD)
+ .build();
+ }
+
+ public static synchronized ArangoDBConnect getInstance(){
+ if (null == conn){
+ conn = new ArangoDBConnect();
+ }
+ return conn;
+ }
+
+ private ArangoDatabase getDatabase(){
+ return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME);
+ }
+
+ public void clean(){
+ try {
+ if (arangoDB != null){
+ arangoDB.shutdown();
+ }
+ }catch (Exception e){
+ e.printStackTrace();
+ }
+ }
+
+ public ArangoCursor executorQuery(String query,Class type){
+ ArangoDatabase database = getDatabase();
+ Map bindVars = new MapBuilder().get();
+ AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL);
+ try {
+ return database.query(query, bindVars, options, type);
+ }catch (Exception e){
+ e.printStackTrace();
+ return null;
+ }finally {
+ bindVars.clear();
+ }
+ }
+
+ @Deprecated
+ public void insertAndUpdate(ArrayList docInsert,ArrayList docUpdate,String collectionName){
+ ArangoDatabase database = getDatabase();
+ try {
+ ArangoCollection collection = database.collection(collectionName);
+ if (!docInsert.isEmpty()){
+ collection.importDocuments(docInsert);
+ }
+ if (!docUpdate.isEmpty()){
+ collection.replaceDocuments(docUpdate);
+ }
+ }catch (Exception e){
+ System.out.println("更新失败");
+ e.printStackTrace();
+ }finally {
+ docInsert.clear();
+ docInsert.clear();
+ }
+ }
+
+ public void overwrite(ArrayList docOverwrite,String collectionName){
+ ArangoDatabase database = getDatabase();
+ try {
+ ArangoCollection collection = database.collection(collectionName);
+ if (!docOverwrite.isEmpty()){
+ DocumentCreateOptions documentCreateOptions = new DocumentCreateOptions();
+ documentCreateOptions.overwrite(true);
+ documentCreateOptions.silent(true);
+ MultiDocumentEntity> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
+ Collection errors = documentCreateEntityMultiDocumentEntity.getErrors();
+ for (ErrorEntity errorEntity:errors){
+ LOG.debug("写入arangoDB异常:"+errorEntity.getErrorMessage());
+ }
+ }
+ }catch (Exception e){
+ System.out.println("更新失败:"+e.toString());
+ }finally {
+ docOverwrite.clear();
+ }
+ }
+
+
+
+}
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java
new file mode 100644
index 0000000..e3142ae
--- /dev/null
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java
@@ -0,0 +1,67 @@
+package cn.ac.iie.utils;
+
+import cn.ac.iie.config.ApplicationConfig;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import java.util.concurrent.*;
+
+/**
+ * 线程池管理
+ * @author wlh
+ */
+public class ExecutorThreadPool {
+ private static ExecutorService pool = null ;
+ private static ExecutorThreadPool poolExecutor = null;
+
+ static {
+ getThreadPool();
+ }
+
+ private static void getThreadPool(){
+ ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
+ .setNameFormat("iplearning-application-pool-%d").build();
+
+ //Common Thread Pool
+ pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER*2,
+ 0L, TimeUnit.MILLISECONDS,
+ new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
+
+// pool = Executors.newFixedThreadPool(ApplicationConfig.THREAD_POOL_NUMBER);
+ }
+
+ public static ExecutorThreadPool getInstance(){
+ if (null == poolExecutor){
+ poolExecutor = new ExecutorThreadPool();
+ }
+ return poolExecutor;
+ }
+
+ public void executor(Runnable command){
+ pool.execute(command);
+ }
+
+ @Deprecated
+ public void awaitThreadTask(){
+ try {
+ while (!pool.awaitTermination(ApplicationConfig.THREAD_AWAIT_TERMINATION_TIME, TimeUnit.SECONDS)) {
+ System.out.println("线程池没有关闭");
+ }
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void shutdown(){
+ pool.shutdown();
+ }
+
+ @Deprecated
+ public static Long getThreadNumber(){
+ String name = Thread.currentThread().getName();
+ String[] split = name.split("-");
+ return Long.parseLong(split[3]);
+ }
+
+
+
+}
diff --git a/ip-learning/src/main/resources/application.properties b/ip-learning-spark/src/main/resources/application.properties
similarity index 100%
rename from ip-learning/src/main/resources/application.properties
rename to ip-learning-spark/src/main/resources/application.properties
diff --git a/ip-learning-spark/src/main/resources/log4j.properties b/ip-learning-spark/src/main/resources/log4j.properties
new file mode 100644
index 0000000..ee350e5
--- /dev/null
+++ b/ip-learning-spark/src/main/resources/log4j.properties
@@ -0,0 +1,25 @@
+######################### logger ##############################
+log4j.logger.org.apache.http=OFF
+log4j.logger.org.apache.http.wire=OFF
+
+#Log4j
+log4j.rootLogger=info,console,file
+# ����̨��־����
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.Threshold=info
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
+
+# �ļ���־����
+log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.file.Threshold=info
+log4j.appender.file.encoding=UTF-8
+log4j.appender.file.Append=true
+#·���������·����������ز��������Ӧ��Ŀ��
+#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
+#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
+log4j.appender.file.file=./logs/ip-learning-application.log
+log4j.appender.file.DatePattern='.'yyyy-MM-dd
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
+log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala
new file mode 100644
index 0000000..9e72fac
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.config
+
+object ApplicationConfig {
+
+}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala
new file mode 100644
index 0000000..3a19be9
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.dao
+
+object BaseClickhouseData {
+
+}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala
new file mode 100644
index 0000000..17385f0
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.main
+
+object IpLearningApplication {
+
+}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala
new file mode 100644
index 0000000..c7939fe
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.service.transform
+
+object MergeDataFrame {
+
+}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala
new file mode 100644
index 0000000..64bed4d
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.service.update
+
+object UpdateDocHandler {
+
+}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala
new file mode 100644
index 0000000..c25c31e
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.service.update
+
+object UpdateDocument {
+
+}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala
new file mode 100644
index 0000000..a3c26ae
--- /dev/null
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala
@@ -0,0 +1,11 @@
+package cn.ac.iie.service.partition
+
+import org.apache.spark.Partitioner
+
+class CustomPartitioner(numPartition: Int) extends Partitioner{
+ override def numPartitions: Int = numPartition
+
+ override def getPartition(key: Any): Int = {
+ Math.abs(key.hashCode()) % numPartition
+ }
+}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/utils/DateTimeUtils.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala
similarity index 51%
rename from ip-learning/src/main/scala/cn/ac/iie/utils/DateTimeUtils.scala
rename to ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala
index 3b66bb6..ce0f417 100644
--- a/ip-learning/src/main/scala/cn/ac/iie/utils/DateTimeUtils.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala
@@ -1,5 +1,5 @@
package cn.ac.iie.utils
-object DateTimeUtils {
+object SparkSessionUtil {
}
diff --git a/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala b/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala
new file mode 100644
index 0000000..7e73a98
--- /dev/null
+++ b/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.dao
+
+object BaseClickhouseDataTest {
+
+}
diff --git a/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala b/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala
new file mode 100644
index 0000000..28828b9
--- /dev/null
+++ b/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala
@@ -0,0 +1,5 @@
+package cn.ac.iie.service.update
+
+object UpdateDocumentTest {
+
+}
diff --git a/ip-learning/pom.xml b/ip-learning/pom.xml
deleted file mode 100644
index b66a1ab..0000000
--- a/ip-learning/pom.xml
+++ /dev/null
@@ -1,211 +0,0 @@
-
-
- 4.0.0
-
- cn.ac.iie
- ip-learning
- 1.0-SNAPSHOT
-
-
-
-
- javax.servlet
- javax.servlet-api
- 3.0.1
-
-
-
- org.apache.httpcomponents
- httpclient
- 4.5.2
-
-
-
-
- org.apache.httpcomponents
- httpcore
- 4.4.6
-
-
-
- com.google.guava
- guava
- 19.0
-
-
-
- org.apache.spark
- spark-core_2.11
- 2.2.3
-
-
-
- org.apache.spark
- spark-sql_2.11
- 2.2.3
-
-
-
- ru.yandex.clickhouse
- clickhouse-jdbc
- 0.1.54
-
-
-
- com.alibaba
- druid
- 1.1.10
-
-
-
- com.typesafe
- config
- 1.2.1
-
-
-
- net.alchim31.maven
- scala-maven-plugin
- 3.2.0
-
-
-
-
- org.scala-lang
- scala-xml
- 2.11.0-M4
-
-
-
- org.scala-lang
- scala-library
- 2.11.8
-
-
-
-
-
- com.orientechnologies
- orientdb-graphdb
- 3.0.31
-
-
-
-
- com.orientechnologies
- orientdb-client
- 3.0.31
-
-
-
- com.orientechnologies
- orientdb-core
- 3.0.31
-
-
-
-
- com.orientechnologies
- orientdb-server
- 3.0.31
-
-
-
-
- com.orientechnologies
- orientdb-jdbc
- 3.0.31
-
-
-
-
-
- com.tinkerpop.blueprints
- blueprints-orient-graph
- 2.4.0
- provided
-
-
-
- com.arangodb
- arangodb-java-driver
- 4.2.2
-
-
-
- com.arangodb
- arangodb-spark-connector
- 1.0.2
-
-
-
-
- com.swoop
- spark-alchemy_2.11
- 0.3.28
-
-
-
-
-
-
-
-
-
-
- org.scala-tools
- maven-scala-plugin
- 2.15.2
-
-
-
- compile
- testCompile
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
- 3.1
-
- 1.8
- 1.8
-
-
-
-
- org.apache.maven.plugins
- maven-assembly-plugin
- 2.6
-
-
-
- cn.ac.iie.main.IPLearningApplication
-
-
-
- jar-with-dependencies
-
-
-
-
- make-assembly
- package
-
- single
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ip-learning/src/main/resources/application.conf b/ip-learning/src/main/resources/application.conf
deleted file mode 100644
index e6ff7dd..0000000
--- a/ip-learning/src/main/resources/application.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-#spark任务配置
-spark.sql.shuffle.partitions=144
-spark.sql.read.fetchsize="10000"
-spark.executor.memory="120g"
-spark.app.name="test"
-spark.network.timeout="300s"
-repartitionNumber=36
-spark.serializer="org.apache.spark.serializer.KryoSerializer"
-master="local[*]"
-#spark读取clickhouse配置
-numPartitions="144"
-maxPoolSize=40
-minTime="1571245199"
-maxTime="1571284799"
-clickhouse.socket.timeout=300000
-#arangoDB配置
-arangoDB.host="192.168.40.127"
-arangoDB.port=8529
-arangoDB.user="root"
-arangoDB.password="111111"
-arangoDB.DB.name="insert_iplearn_index"
-arangoDB.batch=100000
-arangoDB.ttl=3600
-
-thread.pool.number=10
-
diff --git a/ip-learning/src/main/resources/log4j.properties b/ip-learning/src/main/resources/log4j.properties
deleted file mode 100644
index 2039ec3..0000000
--- a/ip-learning/src/main/resources/log4j.properties
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set everything to be logged to the console
-log4j.rootCategory=WARN, console
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
-
-# Set the default spark-shell log level to WARN. When running the spark-shell, the
-# log level for this class is used to overwrite the root logger's log level, so that
-# the user can have different defaults for the shell and regular Spark apps.
-log4j.logger.org.apache.spark.repl.Main=WARN
-
-# Settings to quiet third party logs that are too verbose
-log4j.logger.org.spark_project.jetty=WARN
-log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
-log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN
-log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN
-log4j.logger.org.apache.parquet=ERROR
-log4j.logger.parquet=ERROR
-
-# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
-log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
-log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
diff --git a/ip-learning/src/main/resources/spark-env.sh b/ip-learning/src/main/resources/spark-env.sh
deleted file mode 100644
index 80dd7d4..0000000
--- a/ip-learning/src/main/resources/spark-env.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This file is sourced when running various Spark programs.
-# Copy it as spark-env.sh and edit that to configure Spark for your site.
-
-# Options read when launching programs locally with
-# ./bin/run-example or ./bin/spark-submit
-# - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
-# - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
-# - SPARK_PUBLIC_DNS, to set the public dns name of the driver program
-
-# Options read by executors and drivers running inside the cluster
-# - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
-# - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program
-# - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data
-# - MESOS_NATIVE_JAVA_LIBRARY, to point to your libmesos.so if you use Mesos
-
-# Options read in YARN client mode
-# - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
-# - SPARK_EXECUTOR_CORES, Number of cores for the executors (Default: 1).
-# - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
-# - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
-
-# Options for the daemons used in the standalone deploy mode
-# - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
-# - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master
-# - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y")
-# - SPARK_WORKER_CORES, to set the number of cores to use on this machine
-# - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g)
-# - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker
-# - SPARK_WORKER_DIR, to set the working directory of worker processes
-# - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y")
-# - SPARK_DAEMON_MEMORY, to allocate to the master, worker and history server themselves (default: 1g).
-# - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y")
-# - SPARK_SHUFFLE_OPTS, to set config properties only for the external shuffle service (e.g. "-Dx=y")
-# - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y")
-# - SPARK_DAEMON_CLASSPATH, to set the classpath for all daemons
-# - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers
-
-# Generic options for the daemons used in the standalone deploy mode
-# - SPARK_CONF_DIR Alternate conf dir. (Default: ${SPARK_HOME}/conf)
-# - SPARK_LOG_DIR Where log files are stored. (Default: ${SPARK_HOME}/logs)
-# - SPARK_PID_DIR Where the pid file is stored. (Default: /tmp)
-# - SPARK_IDENT_STRING A string representing this instance of spark. (Default: $USER)
-# - SPARK_NICENESS The scheduling priority for daemons. (Default: 0)
-# - SPARK_NO_DAEMONIZE Run the proposed command in the foreground. It will not output a PID file.
-
-export SPARK_MASTER_IP=bigdata-119
-export SPARK_MASTER_PORT=7077
-export SPARK_WORKER_CORES=4
-export SPARK_WORKER_INSTANCES=1
-export SPARK_WORKER_MEMORY=3g
-export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
-export SCALA_HOME=/home/ceiec/scala-2.11.7
diff --git a/ip-learning/src/main/scala/cn/ac/iie/dao/BaseMediaDataLoad.scala b/ip-learning/src/main/scala/cn/ac/iie/dao/BaseMediaDataLoad.scala
deleted file mode 100644
index ad531bb..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/dao/BaseMediaDataLoad.scala
+++ /dev/null
@@ -1,125 +0,0 @@
-package cn.ac.iie.dao
-
-import cn.ac.iie.test.Config
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-object BaseMediaDataLoad {
-
- def loadMediaDate(spark: SparkSession): Unit = {
- val mediaDataFrame: DataFrame = spark.read.format("jdbc")
- .option("url", "jdbc:clickhouse://192.168.40.193:8123")
- .option("dbtable", s"(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where recv_time>=${Config.MINTIME} and recv_time<=${Config.MAXTIME})")
- .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
- .option("user", "default")
- .option("password", "111111")
- .option("numPartitions", Config.NUMPARTITIONS)
- .option("partitionColumn", "recv_time")
- .option("lowerBound", Config.MINTIME)
- .option("upperBound", Config.MAXTIME)
- .option("fetchsize", Config.SPARK_SQL_READ_FETCHSIZE)
- .load()
- mediaDataFrame.printSchema()
- mediaDataFrame.createOrReplaceGlobalTempView("media_expire_patch")
- }
-
- def getFQDNVertexFromMedia(spark: SparkSession): DataFrame = {
- val v_FQDN_DF = spark.sql(
- """
- |SELECT
- | media_domain AS new_fqdn_name,
- | MIN( recv_time ) AS new_fqdn_first_found_time,
- | MAX( recv_time ) AS new_fqdn_last_found_time,
- | COUNT( * ) AS new_fqdn_count_total
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | media_domain != ''
- |GROUP BY
- | media_domain
- """.stripMargin
- )
- v_FQDN_DF
- }
-
- def getIPVertexFromMedia(spark: SparkSession): DataFrame = {
- val s_IP_DF = spark.sql(
- """
- select
- s1_s_ip as new_ip,
- s1_s_location_region as new_location,
- MIN( recv_time ) AS new_ip_first_found_time,
- MAX( recv_time ) AS new_ip_last_found_time,
- COUNT( * ) AS new_ip_count_total
- from global_temp.media_expire_patch
- GROUP BY
- s1_s_ip,
- s1_s_location_region
- """.stripMargin)
- val d_IP_DF = spark.sql(
- """
- select
- s1_d_ip as new_ip,
- s1_d_location_region as new_location,
-| MIN( recv_time ) AS new_ip_first_found_time,
- MAX( recv_time ) AS new_ip_last_found_time,
- COUNT( * ) AS new_ip_count_total
- from global_temp.media_expire_patch
- GROUP BY
- s1_d_ip,
- s1_d_location_region
- """.stripMargin)
- import org.apache.spark.sql.functions._
- val v_IP_DF = s_IP_DF.union(d_IP_DF).groupBy("new_ip", "new_location").agg(
- min("new_ip_first_found_time").as("new_ip_first_found_time"),
- max("new_ip_last_found_time").as("new_ip_last_found_time"),
- count("new_ip").as("new_ip_count_total")
- )
- v_IP_DF
- }
-
- def getFQDNAddressIPEdgeFromMedia(spark: SparkSession): DataFrame = {
- val e_Address_v_FQDN_to_v_IP_DF = spark.sql(
- """
- |SELECT
- | media_domain AS new_fqdn,
- | s1_d_ip AS new_ip,
- | MIN( recv_time ) AS new_first_found_time,
- | MAX( recv_time ) AS new_last_found_time,
- | COUNT( * ) AS new_count_total,
- | CONCAT_WS('-',media_domain,s1_d_ip) AS new_key
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( media_domain != '' )
- | AND ( s1_d_ip != '' )
- |GROUP BY
- | s1_d_ip,
- | media_domain
- """.stripMargin)
- e_Address_v_FQDN_to_v_IP_DF
- }
-
- def getIPVisitFQDNEdgeFromMedia(spark: SparkSession): DataFrame = {
- val e_Visit_v_IP_to_v_FQDN_DF = spark.sql(
- """
- |SELECT
- | s1_s_ip AS new_ip,
- | media_domain AS new_fqdn,
- | MIN( recv_time ) AS new_first_found_time,
- | MAX( recv_time ) AS new_last_found_time,
- | COUNT( * ) AS new_count_total,
- | CONCAT_WS('-',s1_s_ip,media_domain) as new_key
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( s1_s_ip != '' )
- | AND ( media_domain != '' )
- |GROUP BY
- | s1_s_ip,
- | media_domain
- """.stripMargin)
- e_Visit_v_IP_to_v_FQDN_DF
- }
-
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraph.scala b/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraph.scala
deleted file mode 100644
index b6e7c3f..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraph.scala
+++ /dev/null
@@ -1,177 +0,0 @@
-package cn.ac.iie.dao
-
-import cn.ac.iie.test.ArangoDbTest.arangoDB
-import cn.ac.iie.test.Config
-import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
-import org.apache.spark.sql.DataFrame
-
-import scala.util.Try
-
-object UpdateArangoGraph {
-
- /**
- * 更新FQDN点
- */
- def updateFQDNVertex(v_FQDN_DF:DataFrame): Unit ={
-
- v_FQDN_DF.printSchema()
- v_FQDN_DF.foreachPartition(iter => {
- val v_FQDN_Coll = arangoDB.db("insert_iplearn_index").collection("V_FQDN")
- val docs_Insert = new java.util.ArrayList[BaseDocument]()
- val docs_Update = new java.util.ArrayList[BaseDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("FQDN_NAME")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Last = row.getAs[Long]("FQDN_LAST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-
- if (v_FQDN_Coll.documentExists(fqdn)) {
- val document: BaseDocument = v_FQDN_Coll.getDocument(fqdn, classOf[BaseDocument])
- val fqdn_Cnt = Try(document.getAttribute("FQDN_COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("FQDN_COUNT_TOTAL", fqdn_Cnt)
- document.addAttribute("FQDN_LAST_FOUND_TIME", v_Fqdn_Last)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(fqdn)
- baseDocument.addAttribute("FQDN_NAME", fqdn)
- baseDocument.addAttribute("FQDN_FIRST_FOUND_TIME", v_Fqdn_First)
- baseDocument.addAttribute("FQDN_LAST_FOUND_TIME", v_Fqdn_Last)
- baseDocument.addAttribute("FQDN_COUNT_TOTAL", v_Fqdn_Cnt)
- docs_Insert.add(baseDocument)
- }
- i+=1
- })
- Try(v_FQDN_Coll.importDocuments(docs_Insert))
- Try(v_FQDN_Coll.updateDocuments(docs_Update))
- })
- }
-
- /**
- * 更新IP点
- */
- def updateIPVertex(v_IP_DF:DataFrame): Unit ={
- v_IP_DF.printSchema()
- v_IP_DF.foreachPartition(iter => {
- val v_IP_Coll = arangoDB.db("insert_iplearn_index").collection("V_IP")
- val docs_Insert: java.util.ArrayList[BaseDocument] = new java.util.ArrayList[BaseDocument]()
- val docs_Update: java.util.ArrayList[BaseDocument] = new java.util.ArrayList[BaseDocument]()
- var i = 0
-
- iter.foreach(row => {
- val ip = row.getAs[String]("IP")
- val location = row.getAs[String]("location")
- val v_IP_First = row.getAs[Long]("IP_FIRST_FOUND_TIME")
- val v_IP_Last = row.getAs[Long]("IP_LAST_FOUND_TIME")
- val v_IP_Cnt = row.getAs[Long]("IP_COUNT_TOTAL")
-
- if (v_IP_Coll.documentExists(ip)) {
- val document: BaseDocument = v_IP_Coll.getDocument(ip, classOf[BaseDocument])
- val ip_Cnt = Try(document.getAttribute("IP_APPEAR_COUNT")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", v_IP_Last)
- document.addAttribute("IP_APPEAR_COUNT", v_IP_Cnt+ip_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(ip)
- baseDocument.addAttribute("IP", ip)
- baseDocument.addAttribute("IP_LOCATION", location)
- baseDocument.addAttribute("FIRST_FOUND_TIME", v_IP_First)
- baseDocument.addAttribute("LAST_FOUND_TIME", v_IP_Last)
- baseDocument.addAttribute("IP_APPEAR_COUNT", v_IP_Cnt)
- docs_Insert.add(baseDocument)
- }
- i+=1
- })
- Try(v_IP_Coll.importDocuments(docs_Insert))
- Try(v_IP_Coll.updateDocuments(docs_Update))
- })
-
- }
-
- /**
- * 统计e_Address_Fqdn_to_IP
- */
- def updateFQDNAddressIPEdge(e_Address_v_FQDN_to_v_IP_DF:DataFrame): Unit ={
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Address_v_FQDN_to_v_IP_DF.foreachPartition(iter => {
- val e_Add_Fqdn_to_IP_Coll = arangoDB.db("insert_iplearn_index").collection("E_ADDRESS_V_FQDN_TO_V_IP")
- val docs_Insert: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- val docs_Update: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
-
- if (e_Add_Fqdn_to_IP_Coll.documentExists(fqdn+"-"+ip)) {
- val document: BaseEdgeDocument = e_Add_Fqdn_to_IP_Coll.getDocument(fqdn+"-"+ip, classOf[BaseEdgeDocument])
- val e_new_Cnt = Try(document.getAttribute("COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", e_Last)
- document.addAttribute("COUNT_TOTAL", e_new_Cnt+e_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseEdgeDocument = new BaseEdgeDocument()
- baseDocument.setKey(fqdn+"-"+ip)
- baseDocument.setFrom(s"V_FQDN/$fqdn")
- baseDocument.setTo(s"V_IP/$ip")
- baseDocument.addAttribute("COUNT_TOTAL",e_Cnt)
- baseDocument.addAttribute("FIRST_FOUND_TIME",e_First)
- baseDocument.addAttribute("LAST_FOUND_TIME",e_Last)
- docs_Insert.add(baseDocument)
- }
- // println(fqdn+"-"+ip)
- i+=1
- })
- Try(e_Add_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Add_Fqdn_to_IP_Coll.updateDocuments(docs_Update))
- })
- }
-
- /**
- * 统计e_Visit_v_IP_to_v_FQDN
- */
- def updateIPVisitFQDNEdge(e_Visit_v_IP_to_v_FQDN_DF:DataFrame): Unit ={
- e_Visit_v_IP_to_v_FQDN_DF.printSchema()
- e_Visit_v_IP_to_v_FQDN_DF.foreachPartition(iter => {
- val e_Visit_Fqdn_to_IP_Coll = arangoDB.db("insert_iplearn_index").collection("E_VISIT_V_IP_TO_V_FQDN")
- val docs_Insert: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- val docs_Update: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
-
- if (e_Visit_Fqdn_to_IP_Coll.documentExists(ip+"-"+fqdn)) {
- val document: BaseEdgeDocument = e_Visit_Fqdn_to_IP_Coll.getDocument(ip+"-"+fqdn, classOf[BaseEdgeDocument])
- val e_new_Cnt = Try(document.getAttribute("COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", e_Last)
- document.addAttribute("COUNT_TOTAL", e_new_Cnt+e_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseEdgeDocument = new BaseEdgeDocument()
- baseDocument.setKey(ip+"-"+fqdn)
- baseDocument.setFrom(s"V_IP/$ip")
- baseDocument.setTo(s"V_FQDN/$fqdn")
- baseDocument.addAttribute("COUNT_TOTAL",e_Cnt)
- baseDocument.addAttribute("FIRST_FOUND_TIME",e_First)
- baseDocument.addAttribute("LAST_FOUND_TIME",e_Last)
- docs_Insert.add(baseDocument)
- }
- i+=1
- })
- Try(e_Visit_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Visit_Fqdn_to_IP_Coll.updateDocuments(docs_Update))
- })
-
-
- }
-
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByArangoSpark.scala b/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByArangoSpark.scala
deleted file mode 100644
index 721d07f..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByArangoSpark.scala
+++ /dev/null
@@ -1,237 +0,0 @@
-package cn.ac.iie.dao
-
-import cn.ac.iie.etl.CursorTransform
-import cn.ac.iie.pojo.{BaseEdgeIPVisitFqdn, BaseEgdeFqdnAddressIP, BaseVertexFqdn, BaseVertexIP}
-import cn.ac.iie.test.Config
-import cn.ac.iie.utils.{ConfigUtils, InitArangoDBPool}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-import scala.util.Try
-
-object UpdateArangoGraphByArangoSpark {
- /**
- * 更新FQDN点
- */
- def updateFQDNVertex(v_FQDN_DF:DataFrame,v_FQDN_Cursor_DF: DataFrame): Unit ={
- v_FQDN_DF.printSchema()
- v_FQDN_Cursor_DF.printSchema()
-
- val v_Fqdn_Join_Df = v_FQDN_DF
- .join(v_FQDN_Cursor_DF,v_FQDN_DF("new_fqdn_name")===v_FQDN_Cursor_DF("key"),"fullouter")
- v_Fqdn_Join_Df.printSchema()
-
- v_Fqdn_Join_Df.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val v_FQDN_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("V_FQDN")
- val docs_Replace = new java.util.ArrayList[BaseVertexFqdn]()
- val docs_Insert = new java.util.ArrayList[BaseVertexFqdn]()
-
- iter.foreach(row => {
- val new_fqdn_name = row.getAs[String]("new_fqdn_name")
- val new_fqdn_first_found_time = row.getAs[Long]("new_fqdn_first_found_time")
- val new_fqdn_last_found_time = row.getAs[Long]("new_fqdn_last_found_time")
- val new_fqdn_count_total = row.getAs[Long]("new_fqdn_count_total")
-
- val fqdn = row.getAs[String]("key")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-
- if (fqdn != null) {
- val document: BaseVertexFqdn = new BaseVertexFqdn()
- document.setKey(new_fqdn_name)
- document.setFQDN_NAME(new_fqdn_name)
- document.setFQDN_FIRST_FOUND_TIME(v_Fqdn_First)
- document.setFQDN_LAST_FOUND_TIME(new_fqdn_last_found_time)
- document.setFQDN_COUNT_TOTAL(v_Fqdn_Cnt+new_fqdn_count_total)
- docs_Replace.add(document)
- } else {
- val baseDocument: BaseVertexFqdn = new BaseVertexFqdn()
- baseDocument.setKey(new_fqdn_name)
- baseDocument.setFQDN_NAME(new_fqdn_name)
- baseDocument.setFQDN_FIRST_FOUND_TIME(new_fqdn_first_found_time)
- baseDocument.setFQDN_LAST_FOUND_TIME(new_fqdn_last_found_time)
- baseDocument.setFQDN_COUNT_TOTAL(new_fqdn_count_total)
- docs_Insert.add(baseDocument)
- }
- })
- Try(v_FQDN_Coll.replaceDocuments(docs_Replace))
- Try(v_FQDN_Coll.importDocuments(docs_Insert))
- })
-
- }
-
- /**
- * 更新IP点
- */
- def updateIPVertex(v_IP_DF:DataFrame,v_IP_Cursor_DF: DataFrame): Unit ={
- v_IP_DF.printSchema()
-
- v_IP_Cursor_DF.printSchema()
-
- val v_IP_Join_DF = v_IP_DF.join(v_IP_Cursor_DF,v_IP_DF("new_ip")===v_IP_Cursor_DF("key"),"fullouter")
- v_IP_Join_DF.printSchema()
-
- v_IP_Join_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val v_IP_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("V_IP")
- val docs_Insert: java.util.ArrayList[BaseVertexIP] = new java.util.ArrayList[BaseVertexIP]()
- val docs_Replace: java.util.ArrayList[BaseVertexIP] = new java.util.ArrayList[BaseVertexIP]()
-
- iter.foreach(row => {
- val new_Ip = row.getAs[String]("new_ip")
- val new_Location = row.getAs[String]("new_location")
- val new_Ip_First_Found_Time = row.getAs[Long]("new_ip_first_found_time")
- val new_Ip_Last_Found_Time = row.getAs[Long]("new_ip_last_found_time")
- val new_Ip_Count_Total = row.getAs[Long]("new_ip_count_total")
-
- val key = row.getAs[String]("key")
- val location = row.getAs[String]("IP_LOCATION")
- val v_IP_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val v_IP_Cnt = row.getAs[Long]("IP_APPEAR_COUNT")
-
-
- if (key != null) {
- val document = new BaseVertexIP()
- document.setKey(key)
- document.setIP(key)
- document.setLAST_FOUND_TIME(new_Ip_Last_Found_Time)
- document.setIP_APPEAR_COUNT(v_IP_Cnt+new_Ip_Count_Total)
- document.setFIRST_FOUND_TIME(v_IP_First)
- document.setIP_LOCATION(location)
- docs_Replace.add(document)
- } else {
- val baseDocument = new BaseVertexIP()
- baseDocument.setKey(new_Ip)
- baseDocument.setIP(new_Ip)
- baseDocument.setLAST_FOUND_TIME(new_Ip_Last_Found_Time)
- baseDocument.setIP_APPEAR_COUNT(new_Ip_Count_Total)
- baseDocument.setFIRST_FOUND_TIME(new_Ip_First_Found_Time)
- baseDocument.setIP_LOCATION(new_Location)
- docs_Insert.add(baseDocument)
- }
- })
- Try(v_IP_Coll.importDocuments(docs_Insert))
- Try(v_IP_Coll.updateDocuments(docs_Replace))
- })
-
- }
-
- /**
- * 统计e_Address_Fqdn_to_IP
- */
- def updateFQDNAddressIPEdge(e_Address_v_FQDN_to_v_IP_DF:DataFrame,e_Fqdn_Address_IP_Cursor_DF: DataFrame): Unit ={
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Fqdn_Address_IP_Cursor_DF.printSchema()
-
- e_Fqdn_Address_IP_Cursor_DF.printSchema()
-
- val e_Address_v_FQDN_to_v_IP_Join_DF = e_Address_v_FQDN_to_v_IP_DF
- .join(e_Fqdn_Address_IP_Cursor_DF,
- e_Address_v_FQDN_to_v_IP_DF("new_key")===e_Fqdn_Address_IP_Cursor_DF("key"),
- "fullouter")
-
- e_Address_v_FQDN_to_v_IP_Join_DF.printSchema()
-
- e_Address_v_FQDN_to_v_IP_Join_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val e_Add_Fqdn_to_IP_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("E_ADDRESS_V_FQDN_TO_V_IP")
- val docs_Insert: java.util.ArrayList[BaseEgdeFqdnAddressIP] = new java.util.ArrayList[BaseEgdeFqdnAddressIP]()
- val docs_Replace: java.util.ArrayList[BaseEgdeFqdnAddressIP] = new java.util.ArrayList[BaseEgdeFqdnAddressIP]()
- iter.foreach(row => {
- val new_Fqdn = row.getAs[String]("new_fqdn")
- val new_IP = row.getAs[String]("new_ip")
- val new_Key = row.getAs[String]("new_key")
- val new_First_Found_Time = row.getAs[Long]("new_first_found_time")
- val new_Last_Found_Time = row.getAs[Long]("new_last_found_time")
- val new_Count_Total = row.getAs[Long]("new_count_total")
-
- val from = row.getAs[String]("from")
- val to = row.getAs[String]("to")
- val key = row.getAs[String]("key")
- val e_First_Time = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Count_Total = row.getAs[Long]("COUNT_TOTAL")
-
- if (key != null) {
- val document = new BaseEgdeFqdnAddressIP()
- document.setKey(key)
- document.setFrom(from)
- document.setTo(to)
- document.setLAST_FOUND_TIME(new_Last_Found_Time)
- document.setFIRST_FOUND_TIME(e_First_Time)
- document.setCOUNT_TOTAL(new_Count_Total+e_Count_Total)
- docs_Replace.add(document)
- } else {
- val baseDocument: BaseEgdeFqdnAddressIP = new BaseEgdeFqdnAddressIP()
- baseDocument.setKey(new_Key)
- baseDocument.setFrom(s"V_FQDN/$new_Fqdn")
- baseDocument.setTo(s"V_IP/$new_IP")
- baseDocument.setLAST_FOUND_TIME(new_Last_Found_Time)
- baseDocument.setFIRST_FOUND_TIME(new_First_Found_Time)
- baseDocument.setCOUNT_TOTAL(new_Count_Total)
- docs_Insert.add(baseDocument)
- }
- })
- Try(e_Add_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Add_Fqdn_to_IP_Coll.replaceDocuments(docs_Replace))
- })
-
- }
-
-
- /**
- * 统计e_Visit_v_IP_to_v_FQDN
- */
- def updateIPVisitFQDNEdge(e_Visit_v_IP_to_v_FQDN_DF:DataFrame,e_IP_Visit_FQDN_Cursor_DF: DataFrame): Unit = {
- e_Visit_v_IP_to_v_FQDN_DF.printSchema()
- e_IP_Visit_FQDN_Cursor_DF.printSchema()
-
- e_IP_Visit_FQDN_Cursor_DF.printSchema()
-
- val e_Visit_v_IP_to_v_FQDN_Join_DF = e_Visit_v_IP_to_v_FQDN_DF
- .join(e_IP_Visit_FQDN_Cursor_DF, e_Visit_v_IP_to_v_FQDN_DF("new_key") === e_IP_Visit_FQDN_Cursor_DF("key"), "fullouter")
-
- e_Visit_v_IP_to_v_FQDN_Join_DF.printSchema()
-
- e_Visit_v_IP_to_v_FQDN_Join_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val e_Visit_Fqdn_to_IP_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("E_VISIT_V_IP_TO_V_FQDN")
- val docs_Insert: java.util.ArrayList[BaseEdgeIPVisitFqdn] = new java.util.ArrayList[BaseEdgeIPVisitFqdn]()
- val docs_Replace: java.util.ArrayList[BaseEdgeIPVisitFqdn] = new java.util.ArrayList[BaseEdgeIPVisitFqdn]()
- iter.foreach(row => {
- val new_Fqdn = row.getAs[String]("new_fqdn")
- val new_IP = row.getAs[String]("new_ip")
- val new_Key = row.getAs[String]("new_key")
- val new_First_Found_Time = row.getAs[Long]("new_first_found_time")
- val new_Last_Found_Time = row.getAs[Long]("new_last_found_time")
- val new_Count_Total = row.getAs[Long]("new_count_total")
-
- val to = row.getAs[String]("to")
- val from = row.getAs[String]("from")
- val key = row.getAs[String]("key")
- val e_First_Time = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Count_Total = row.getAs[Long]("COUNT_TOTAL")
-
- if (key != null) {
- val document = new BaseEdgeIPVisitFqdn()
- document.setKey(key)
- document.setFrom(from)
- document.setTo(to)
- document.setLAST_FOUND_TIME(new_Last_Found_Time)
- document.setFIRST_FOUND_TIME(e_First_Time)
- document.setCOUNT_TOTAL(new_Count_Total+e_Count_Total)
- docs_Replace.add(document)
- } else {
- val baseDocument: BaseEdgeIPVisitFqdn = new BaseEdgeIPVisitFqdn()
- baseDocument.setKey(new_Key)
- baseDocument.setFrom(s"V_FQDN/$new_Fqdn")
- baseDocument.setTo(s"V_IP/$new_IP")
- baseDocument.setLAST_FOUND_TIME(new_Last_Found_Time)
- baseDocument.setFIRST_FOUND_TIME(new_First_Found_Time)
- baseDocument.setCOUNT_TOTAL(new_Count_Total)
- docs_Insert.add(baseDocument)
- }
- })
- Try(e_Visit_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Visit_Fqdn_to_IP_Coll.replaceDocuments(docs_Replace))
- })
-
- }
-
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByDF.scala b/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByDF.scala
deleted file mode 100644
index 20f5884..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/dao/UpdateArangoGraphByDF.scala
+++ /dev/null
@@ -1,250 +0,0 @@
-package cn.ac.iie.dao
-
-import cn.ac.iie.etl.CursorTransform
-import cn.ac.iie.pojo.{BaseEdgeIPVisitFqdn, BaseEgdeFqdnAddressIP, BaseVertexFqdn, BaseVertexIP}
-import cn.ac.iie.test.Config
-import cn.ac.iie.utils.{ConfigUtils, InitArangoDBPool}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-import scala.util.Try
-
-object UpdateArangoGraphByDF {
-
-
- /**
- * 更新FQDN点
- * @param v_FQDN_DF //读取clickhouse结果集
- * @param spark //sparkSession引擎
- */
- def updateFQDNVertex(v_FQDN_DF:DataFrame,spark:SparkSession): Unit ={
- v_FQDN_DF.printSchema()
-
- val v_FQDN_Cursor_DF = CursorTransform.cursorToDataFrame("V_FQDN",classOf[BaseVertexFqdn],spark)
-
- val v_Fqdn_Join_Df = v_FQDN_DF
- .join(v_FQDN_Cursor_DF,v_FQDN_DF("new_fqdn_name")===v_FQDN_Cursor_DF("key"),"fullouter")
- v_Fqdn_Join_Df.printSchema()
-
- v_Fqdn_Join_Df.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val v_FQDN_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("V_FQDN")
- val docs_Replace = new java.util.ArrayList[BaseVertexFqdn]()
- val docs_Insert = new java.util.ArrayList[BaseVertexFqdn]()
-
- iter.foreach(row => {
- val new_fqdn_name = row.getAs[String]("new_fqdn_name")
- val new_fqdn_first_found_time = row.getAs[Long]("new_fqdn_first_found_time")
- val new_fqdn_last_found_time = row.getAs[Long]("new_fqdn_last_found_time")
- val new_fqdn_count_total = row.getAs[Long]("new_fqdn_count_total")
-
- val fqdn = row.getAs[String]("key")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-
- if (fqdn != null) {
- val document: BaseVertexFqdn = new BaseVertexFqdn()
- document.setKey(new_fqdn_name)
- document.setFQDN_NAME(new_fqdn_name)
- document.setFQDN_FIRST_FOUND_TIME(v_Fqdn_First)
- document.setFQDN_LAST_FOUND_TIME(new_fqdn_last_found_time)
- document.setFQDN_COUNT_TOTAL(v_Fqdn_Cnt+new_fqdn_count_total)
- docs_Replace.add(document)
- } else {
- val baseDocument: BaseVertexFqdn = new BaseVertexFqdn()
- baseDocument.setKey(new_fqdn_name)
- baseDocument.setFQDN_NAME(new_fqdn_name)
- baseDocument.setFQDN_FIRST_FOUND_TIME(new_fqdn_first_found_time)
- baseDocument.setFQDN_LAST_FOUND_TIME(new_fqdn_last_found_time)
- baseDocument.setFQDN_COUNT_TOTAL(new_fqdn_count_total)
- docs_Insert.add(baseDocument)
- }
- })
- Try(v_FQDN_Coll.replaceDocuments(docs_Replace))
- Try(v_FQDN_Coll.importDocuments(docs_Insert))
- })
-
- }
-
- /**
- * 更新IP点
- * @param v_IP_DF //读取clickhouse结果集
- * @param spark //sparkSession引擎
- */
- def updateIPVertex(v_IP_DF:DataFrame,spark:SparkSession): Unit ={
- v_IP_DF.printSchema()
-
- val v_IP_Cursor_DF = CursorTransform.cursorToDataFrame("V_IP",classOf[BaseVertexIP],spark)
-
- val v_IP_Join_DF = v_IP_DF.join(v_IP_Cursor_DF,v_IP_DF("new_ip")===v_IP_Cursor_DF("key"),"fullouter")
- v_IP_Join_DF.printSchema()
-
- v_IP_Join_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val v_IP_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("V_IP")
- val docs_Insert: java.util.ArrayList[BaseVertexIP] = new java.util.ArrayList[BaseVertexIP]()
- val docs_Replace: java.util.ArrayList[BaseVertexIP] = new java.util.ArrayList[BaseVertexIP]()
-
- iter.foreach(row => {
- val new_Ip = row.getAs[String]("new_ip")
- val new_Location = row.getAs[String]("new_location")
- val new_Ip_First_Found_Time = row.getAs[Long]("new_ip_first_found_time")
- val new_Ip_Last_Found_Time = row.getAs[Long]("new_ip_last_found_time")
- val new_Ip_Count_Total = row.getAs[Long]("new_ip_count_total")
-
- val key = row.getAs[String]("key")
- val location = row.getAs[String]("IP_LOCATION")
- val v_IP_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val v_IP_Cnt = row.getAs[Long]("IP_APPEAR_COUNT")
-
-
- if (key != null) {
- val document = new BaseVertexIP()
- document.setKey(key)
- document.setIP(key)
- document.setLAST_FOUND_TIME(new_Ip_Last_Found_Time)
- document.setIP_APPEAR_COUNT(v_IP_Cnt+new_Ip_Count_Total)
- document.setFIRST_FOUND_TIME(v_IP_First)
- document.setIP_LOCATION(location)
- docs_Replace.add(document)
- } else {
- val baseDocument = new BaseVertexIP()
- baseDocument.setKey(new_Ip)
- baseDocument.setIP(new_Ip)
- baseDocument.setLAST_FOUND_TIME(new_Ip_Last_Found_Time)
- baseDocument.setIP_APPEAR_COUNT(new_Ip_Count_Total)
- baseDocument.setFIRST_FOUND_TIME(new_Ip_First_Found_Time)
- baseDocument.setIP_LOCATION(new_Location)
- docs_Insert.add(baseDocument)
- }
- })
- Try(v_IP_Coll.importDocuments(docs_Insert))
- Try(v_IP_Coll.updateDocuments(docs_Replace))
- })
-
- }
-
- /**
- * 统计e_Address_Fqdn_to_IP
- * @param e_Address_v_FQDN_to_v_IP_DF //读取clickhouse结果集
- * @param spark //sparkSession引擎
- */
- def updateFQDNAddressIPEdge(e_Address_v_FQDN_to_v_IP_DF:DataFrame,spark:SparkSession): Unit ={
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- val e_Fqdn_Address_IP_Cursor_DF = CursorTransform
- .cursorToDataFrame("E_ADDRESS_V_FQDN_TO_V_IP",classOf[BaseEgdeFqdnAddressIP],spark)
-
- e_Fqdn_Address_IP_Cursor_DF.printSchema()
-
- val e_Address_v_FQDN_to_v_IP_Join_DF = e_Address_v_FQDN_to_v_IP_DF
- .join(e_Fqdn_Address_IP_Cursor_DF,
- e_Address_v_FQDN_to_v_IP_DF("new_key")===e_Fqdn_Address_IP_Cursor_DF("key"),
- "fullouter")
-
- e_Address_v_FQDN_to_v_IP_Join_DF.printSchema()
-
- e_Address_v_FQDN_to_v_IP_Join_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val e_Add_Fqdn_to_IP_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("E_ADDRESS_V_FQDN_TO_V_IP")
- val docs_Insert: java.util.ArrayList[BaseEgdeFqdnAddressIP] = new java.util.ArrayList[BaseEgdeFqdnAddressIP]()
- val docs_Replace: java.util.ArrayList[BaseEgdeFqdnAddressIP] = new java.util.ArrayList[BaseEgdeFqdnAddressIP]()
- iter.foreach(row => {
- val new_Fqdn = row.getAs[String]("new_fqdn")
- val new_IP = row.getAs[String]("new_ip")
- val new_Key = row.getAs[String]("new_key")
- val new_First_Found_Time = row.getAs[Long]("new_first_found_time")
- val new_Last_Found_Time = row.getAs[Long]("new_last_found_time")
- val new_Count_Total = row.getAs[Long]("new_count_total")
-
- val from = row.getAs[String]("from")
- val to = row.getAs[String]("to")
- val key = row.getAs[String]("key")
- val e_First_Time = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Count_Total = row.getAs[Long]("COUNT_TOTAL")
-
- if (key != null) {
- val document = new BaseEgdeFqdnAddressIP()
- document.setKey(key)
- document.setFrom(from)
- document.setTo(to)
- document.setLAST_FOUND_TIME(new_Last_Found_Time)
- document.setFIRST_FOUND_TIME(e_First_Time)
- document.setCOUNT_TOTAL(new_Count_Total+e_Count_Total)
- docs_Replace.add(document)
- } else {
- val baseDocument: BaseEgdeFqdnAddressIP = new BaseEgdeFqdnAddressIP()
- baseDocument.setKey(new_Key)
- baseDocument.setFrom(s"V_FQDN/$new_Fqdn")
- baseDocument.setTo(s"V_IP/$new_IP")
- baseDocument.setLAST_FOUND_TIME(new_Last_Found_Time)
- baseDocument.setFIRST_FOUND_TIME(new_First_Found_Time)
- baseDocument.setCOUNT_TOTAL(new_Count_Total)
- docs_Insert.add(baseDocument)
- }
- })
- Try(e_Add_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Add_Fqdn_to_IP_Coll.replaceDocuments(docs_Replace))
- })
-
- }
-
-
- /**
- * 统计e_Visit_v_IP_to_v_FQDN
- * @param e_Visit_v_IP_to_v_FQDN_DF //读取clickhouse结果集
- * @param spark //sparkSession引擎
- */
- def updateIPVisitFQDNEdge(e_Visit_v_IP_to_v_FQDN_DF:DataFrame,spark:SparkSession): Unit = {
- e_Visit_v_IP_to_v_FQDN_DF.printSchema()
- val e_IP_Visit_FQDN_Cursor_DF = CursorTransform
- .cursorToDataFrame("E_VISIT_V_IP_TO_V_FQDN",classOf[BaseEdgeIPVisitFqdn],spark)
-
- e_IP_Visit_FQDN_Cursor_DF.printSchema()
-
- val e_Visit_v_IP_to_v_FQDN_Join_DF = e_Visit_v_IP_to_v_FQDN_DF
- .join(e_IP_Visit_FQDN_Cursor_DF, e_Visit_v_IP_to_v_FQDN_DF("new_key") === e_IP_Visit_FQDN_Cursor_DF("key"), "fullouter")
-
- e_Visit_v_IP_to_v_FQDN_Join_DF.printSchema()
-
- e_Visit_v_IP_to_v_FQDN_Join_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val e_Visit_Fqdn_to_IP_Coll = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("E_VISIT_V_IP_TO_V_FQDN")
- val docs_Insert: java.util.ArrayList[BaseEdgeIPVisitFqdn] = new java.util.ArrayList[BaseEdgeIPVisitFqdn]()
- val docs_Replace: java.util.ArrayList[BaseEdgeIPVisitFqdn] = new java.util.ArrayList[BaseEdgeIPVisitFqdn]()
- iter.foreach(row => {
- val new_Fqdn = row.getAs[String]("new_fqdn")
- val new_IP = row.getAs[String]("new_ip")
- val new_Key = row.getAs[String]("new_key")
- val new_First_Found_Time = row.getAs[Long]("new_first_found_time")
- val new_Last_Found_Time = row.getAs[Long]("new_last_found_time")
- val new_Count_Total = row.getAs[Long]("new_count_total")
-
- val to = row.getAs[String]("to")
- val from = row.getAs[String]("from")
- val key = row.getAs[String]("key")
- val e_First_Time = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Count_Total = row.getAs[Long]("COUNT_TOTAL")
-
- if (key != null) {
- val document = new BaseEdgeIPVisitFqdn()
- document.setKey(key)
- document.setFrom(from)
- document.setTo(to)
- document.setLAST_FOUND_TIME(new_Last_Found_Time)
- document.setFIRST_FOUND_TIME(e_First_Time)
- document.setCOUNT_TOTAL(new_Count_Total+e_Count_Total)
- docs_Replace.add(document)
- } else {
- val baseDocument: BaseEdgeIPVisitFqdn = new BaseEdgeIPVisitFqdn()
- baseDocument.setKey(new_Key)
- baseDocument.setFrom(s"V_FQDN/$new_Fqdn")
- baseDocument.setTo(s"V_IP/$new_IP")
- baseDocument.setLAST_FOUND_TIME(new_Last_Found_Time)
- baseDocument.setFIRST_FOUND_TIME(new_First_Found_Time)
- baseDocument.setCOUNT_TOTAL(new_Count_Total)
- docs_Insert.add(baseDocument)
- }
- })
- Try(e_Visit_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Visit_Fqdn_to_IP_Coll.replaceDocuments(docs_Replace))
- })
-
- }
-
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/etl/CursorTransform.scala b/ip-learning/src/main/scala/cn/ac/iie/etl/CursorTransform.scala
deleted file mode 100644
index 9d309f2..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/etl/CursorTransform.scala
+++ /dev/null
@@ -1,33 +0,0 @@
-package cn.ac.iie.etl
-
-import cn.ac.iie.utils.{ConfigUtils, InitArangoDBPool}
-import com.arangodb.ArangoCursor
-import com.arangodb.entity.BaseDocument
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-import scala.reflect.ClassTag
-
-object CursorTransform {
-
- /**
- * 将查询ArangoDB的结果集转换为DataFrame
- * @param collection_Name //查询的collection
- * @param class_Type //转换的pojo类对象
- * @param spark / /sparkSession引擎
- * @tparam T
- * @return
- */
- def cursorToDataFrame[T:ClassTag](collection_Name:String,class_Type: Class[T],spark:SparkSession): DataFrame ={
- val query = s"FOR doc IN $collection_Name RETURN doc"
- println(query)
- val cursor: ArangoCursor[T] = InitArangoDBPool.arangoDB.db(ConfigUtils.ARANGODB_DB_NAME)
- .query(query, InitArangoDBPool.bindVars, InitArangoDBPool.options, class_Type)
-
- val cursor_DF = spark.createDataFrame(cursor.asListRemaining(),class_Type)
- cursor_DF.printSchema()
-
- cursor_DF
-
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/main/IPLearningApplication.scala b/ip-learning/src/main/scala/cn/ac/iie/main/IPLearningApplication.scala
deleted file mode 100644
index 39c5182..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/main/IPLearningApplication.scala
+++ /dev/null
@@ -1,29 +0,0 @@
-package cn.ac.iie.main
-
-import cn.ac.iie.test.Config
-import cn.ac.iie.dao.BaseMediaDataLoad
-import org.apache.spark.sql.SparkSession
-import org.slf4j.{Logger, LoggerFactory}
-
-object IPLearningApplication {
- private val logger: Logger = LoggerFactory.getLogger(IPLearningApplication.getClass)
-
- def main(args: Array[String]): Unit = {
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
- // .config("spark.kryo.classesToRegister","com.tinkerpop.blueprints.impls.orient.OrientGraphFactory")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
- .getOrCreate()
-
- logger.warn("sparkession获取成功!!!")
- BaseMediaDataLoad.loadMediaDate(spark)
-// BaseMediaDataLoad.
-
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEdgeIPVisitFqdn.scala b/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEdgeIPVisitFqdn.scala
deleted file mode 100644
index 6bb4298..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEdgeIPVisitFqdn.scala
+++ /dev/null
@@ -1,34 +0,0 @@
-package cn.ac.iie.pojo
-
-import com.arangodb.entity.DocumentField
-import com.arangodb.entity.DocumentField.Type
-
-import scala.beans.BeanProperty
-
-class BaseEdgeIPVisitFqdn {
- @BeanProperty
- @DocumentField(Type.FROM)
- var from: String=""
-
- @BeanProperty
- @DocumentField(Type.TO)
- var to: String=""
-
- @BeanProperty
- @DocumentField(Type.KEY)
- var key: String=""
-
- @BeanProperty
- @DocumentField(Type.ID)
- var id: String=""
-
- @BeanProperty
- var FIRST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var LAST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var COUNT_TOTAL:Long = 0
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEgdeFqdnAddressIP.scala b/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEgdeFqdnAddressIP.scala
deleted file mode 100644
index 89424b1..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseEgdeFqdnAddressIP.scala
+++ /dev/null
@@ -1,34 +0,0 @@
-package cn.ac.iie.pojo
-
-import com.arangodb.entity.DocumentField
-import com.arangodb.entity.DocumentField.Type
-
-import scala.beans.BeanProperty
-
-class BaseEgdeFqdnAddressIP {
- @BeanProperty
- @DocumentField(Type.FROM)
- var from: String=""
-
- @BeanProperty
- @DocumentField(Type.TO)
- var to: String=""
-
- @BeanProperty
- @DocumentField(Type.KEY)
- var key: String=""
-
- @BeanProperty
- @DocumentField(Type.ID)
- var id: String=""
-
- @BeanProperty
- var FIRST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var LAST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var COUNT_TOTAL:Long = 0
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexFqdn.scala b/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexFqdn.scala
deleted file mode 100644
index eb7983c..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexFqdn.scala
+++ /dev/null
@@ -1,30 +0,0 @@
-package cn.ac.iie.pojo
-
-import com.arangodb.entity.DocumentField
-import com.arangodb.entity.DocumentField.Type
-
-import scala.beans.BeanProperty
-
-class BaseVertexFqdn {
-
- @BeanProperty
- @DocumentField(Type.KEY)
- var key: String=""
-
- @BeanProperty
- @DocumentField(Type.ID)
- var id: String=""
-
- @BeanProperty
- var FQDN_FIRST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var FQDN_LAST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var FQDN_COUNT_TOTAL:Long = 0
-
- @BeanProperty
- var FQDN_NAME:String = ""
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexIP.scala b/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexIP.scala
deleted file mode 100644
index 037b632..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/pojo/BaseVertexIP.scala
+++ /dev/null
@@ -1,32 +0,0 @@
-package cn.ac.iie.pojo
-
-import com.arangodb.entity.DocumentField
-import com.arangodb.entity.DocumentField.Type
-
-import scala.beans.BeanProperty
-
-class BaseVertexIP {
- @BeanProperty
- @DocumentField(Type.KEY)
- var key: String=""
-
- @BeanProperty
- @DocumentField(Type.ID)
- var id: String=""
-
- @BeanProperty
- var FIRST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var LAST_FOUND_TIME:Long = 0
-
- @BeanProperty
- var IP_APPEAR_COUNT:Long = 0
-
- @BeanProperty
- var IP:String = ""
-
- @BeanProperty
- var IP_LOCATION:String = ""
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDBSparkTest.scala b/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDBSparkTest.scala
deleted file mode 100644
index f520f3c..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDBSparkTest.scala
+++ /dev/null
@@ -1,52 +0,0 @@
-package cn.ac.iie.test
-
-import cn.ac.iie.dao.{BaseMediaDataLoad, UpdateArangoGraphByArangoSpark}
-import cn.ac.iie.pojo.{BaseEdgeIPVisitFqdn, BaseEgdeFqdnAddressIP, BaseVertexFqdn, BaseVertexIP}
-import cn.ac.iie.utils.ConfigUtils
-import com.arangodb.spark.rdd.ArangoRDD
-import com.arangodb.spark.{ArangoSpark, ReadOptions}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-object ArangoDBSparkTest {
- def main(args: Array[String]): Unit = {
- val spark: SparkSession = SparkSession
- .builder()
- .appName(ConfigUtils.SPARK_APP_NAME)
- .config("spark.serializer", ConfigUtils.SPARK_SERIALIZER)
- .config("spark.network.timeout", ConfigUtils.SPARK_NETWORK_TIMEOUT)
- .config("spark.sql.shuffle.partitions", ConfigUtils.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", ConfigUtils.SPARK_EXECUTOR_MEMORY)
- .config("arangodb.hosts", "192.168.40.127:8529")
- .config("arangodb.user", ConfigUtils.ARANGODB_USER)
- .config("arangodb.password", ConfigUtils.ARANGODB_PASSWORD)
- .config("arangodb.maxConnections",ConfigUtils.MAXPOOLSIZE)
- .master(ConfigUtils.MASTER)
- .getOrCreate()
-
- BaseMediaDataLoad.loadMediaDate(spark)
- val v_FQDN_DF = BaseMediaDataLoad.getFQDNVertexFromMedia(spark)
- val v_IP_DF = BaseMediaDataLoad.getIPVertexFromMedia(spark)
- val e_Address_v_FQDN_to_v_IP_DF = BaseMediaDataLoad.getFQDNAddressIPEdgeFromMedia(spark)
- val e_Visit_v_IP_to_v_FQDN_DF= BaseMediaDataLoad.getIPVisitFQDNEdgeFromMedia(spark)
-
- val v_FQDN_Cursor_Rdd: ArangoRDD[BaseVertexFqdn] = ArangoSpark.load[BaseVertexFqdn](spark.sparkContext, "V_FQDN", ReadOptions(ConfigUtils.ARANGODB_DB_NAME))
- val v_FQDN_Cursor_DF: DataFrame = spark.createDataFrame(v_FQDN_Cursor_Rdd,classOf[BaseVertexFqdn])
-
- val v_IP_Cursor_Rdd: ArangoRDD[BaseVertexIP] = ArangoSpark.load[BaseVertexIP](spark.sparkContext, "V_IP", ReadOptions(ConfigUtils.ARANGODB_DB_NAME))
- val v_IP_Cursor_DF: DataFrame = spark.createDataFrame(v_IP_Cursor_Rdd,classOf[BaseVertexIP])
-
- val e_Fqdn_Address_IP_Cursor_Rdd: ArangoRDD[BaseEgdeFqdnAddressIP] = ArangoSpark.load[BaseEgdeFqdnAddressIP](spark.sparkContext, "E_ADDRESS_V_FQDN_TO_V_IP", ReadOptions(ConfigUtils.ARANGODB_DB_NAME))
- val e_Fqdn_Address_IP_Cursor_DF: DataFrame = spark.createDataFrame(e_Fqdn_Address_IP_Cursor_Rdd,classOf[BaseEgdeFqdnAddressIP])
-
- val e_IP_Visit_FQDN_Cursor_Rdd: ArangoRDD[BaseEdgeIPVisitFqdn] = ArangoSpark.load[BaseEdgeIPVisitFqdn](spark.sparkContext, "E_VISIT_V_IP_TO_V_FQDN", ReadOptions(ConfigUtils.ARANGODB_DB_NAME))
- val e_IP_Visit_FQDN_Cursor_DF: DataFrame = spark.createDataFrame(e_IP_Visit_FQDN_Cursor_Rdd,classOf[BaseEdgeIPVisitFqdn])
-
- UpdateArangoGraphByArangoSpark.updateFQDNVertex(v_FQDN_DF,v_FQDN_Cursor_DF)
- UpdateArangoGraphByArangoSpark.updateIPVertex(v_IP_DF,v_IP_Cursor_DF)
- UpdateArangoGraphByArangoSpark.updateFQDNAddressIPEdge(e_Address_v_FQDN_to_v_IP_DF,e_Fqdn_Address_IP_Cursor_DF)
- UpdateArangoGraphByArangoSpark.updateIPVisitFQDNEdge(e_Visit_v_IP_to_v_FQDN_DF,e_IP_Visit_FQDN_Cursor_DF)
- spark.close()
- System.exit(1)
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbReadV_IPTest.scala b/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbReadV_IPTest.scala
deleted file mode 100644
index e570e58..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbReadV_IPTest.scala
+++ /dev/null
@@ -1,37 +0,0 @@
-package cn.ac.iie.test
-
-import com.arangodb.entity.BaseDocument
-import com.arangodb.model.AqlQueryOptions
-import com.arangodb.util.MapBuilder
-import com.arangodb.{ArangoCursor, ArangoDB}
-
-object ArangoDbReadV_IPTest {
- @transient
- var arangoDB: ArangoDB = _
-
- def main(args: Array[String]): Unit = {
- arangoDB = new ArangoDB.Builder()
- .maxConnections(Config.MAXPOOLSIZE)
- .host("192.168.40.127", 8529)
- .user("root")
- .password("111111")
- .build
- val bindVars = new MapBuilder().get
- val options = new AqlQueryOptions()
- .ttl(Config.ARANGODB_TTL)
- val v_IP_Mutabal_Map = new java.util.HashMap[String,BaseDocument](16048576,0.9f)
- val v_IP_Query = "FOR doc IN V_IP RETURN doc"
- val v_IP_Cursor: ArangoCursor[BaseDocument] = arangoDB.db("insert_iplearn_index")
- .query(v_IP_Query, bindVars, options, classOf[BaseDocument])
-
- while (v_IP_Cursor.hasNext){
- val document = v_IP_Cursor.next()
- v_IP_Mutabal_Map.put(document.getKey ,document)
- }
-
- println(v_IP_Mutabal_Map.size())
- arangoDB.shutdown()
-
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTest.scala b/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTest.scala
deleted file mode 100644
index e80b02b..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTest.scala
+++ /dev/null
@@ -1,314 +0,0 @@
-package cn.ac.iie.test
-
-import com.arangodb.ArangoDB
-import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-import org.slf4j.{Logger, LoggerFactory}
-
-import scala.util.Try
-
-object ArangoDbTest {
- private val logger: Logger = LoggerFactory.getLogger(ReadClickhouseTest.getClass)
-
- @transient
- var arangoDB: ArangoDB = _
-
- def main(args: Array[String]): Unit = {
- // val warehouseLocation = new File("spark-warehouse").getAbsolutePath
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", Config.SPARK_SERIALIZER)
- // .config("spark.kryo.classesToRegister","com.tinkerpop.blueprints.impls.orient.OrientGraphFactory")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
-
- /*
- .config("spark.driver.host", "192.168.41.79")
- .config("spark.jars", "D:\\GITREPO\\ip-learning\\target\\ip-learning-1.0-SNAPSHOT-jar-with-dependencies.jar")
- .master("spark://192.168.40.119:7077")
- */
- .getOrCreate()
- logger.warn("sparkession获取成功!!!")
-
- // val sql = "(select * from av_miner.media_expire_patch_local limit 1000)"
-
- val mediaDataFrame: DataFrame = spark.read.format("jdbc")
- .option("url", "jdbc:clickhouse://192.168.40.193:8123")
- // .option("dbtable", "av_miner.media_expire_patch")
- // .option("dbtable", "(select * from av_miner.media_expire_patch limit 10)")
- // .option("dbtable","(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where media_domain not LIKE '%\\n%')")
- .option("dbtable", s"(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where recv_time>=${Config.MINTIME} and recv_time<=${Config.MAXTIME})")
- // .option("dbtable","(select media_domain,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region,min(recv_time) as min_recv_time,max(recv_time) as max_recv_time from av_miner.media_expire_patch group by media_domain,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region limit 10)")
- .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
- .option("user", "default")
- .option("password", "111111")
- .option("numPartitions", Config.NUMPARTITIONS)
- .option("partitionColumn", "recv_time")
- .option("lowerBound", Config.MINTIME)
- .option("upperBound", Config.MAXTIME)
- .option("fetchsize", Config.SPARK_SQL_READ_FETCHSIZE)
- .load()
- // mediaDataFrame.printSchema()
- mediaDataFrame.createOrReplaceGlobalTempView("media_expire_patch")
- // val mediaDataGlobalView = spark.sql("select * from global_temp.media_expire_patch limit 10")
- // mediaDataGlobalView.show()
-
- val v_FQDN_DF = spark.sql(
- """
- |SELECT
- | media_domain AS FQDN_NAME,
- | MIN( recv_time ) AS FQDN_FIRST_FOUND_TIME,
- | MAX( recv_time ) AS FQDN_LAST_FOUND_TIME,
- | COUNT( * ) AS FQDN_COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | media_domain != ''
- |GROUP BY
- | media_domain
- """.stripMargin
- )
- val s_IP_DF = spark.sql(
- """
- select
- s1_s_ip as IP,
- s1_s_location_region as location,
- MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- COUNT( * ) AS IP_COUNT_TOTAL
- from global_temp.media_expire_patch
- GROUP BY
- IP,
- location
- """.stripMargin)
-
- val d_IP_DF = spark.sql(
- """
- select
- s1_d_ip as IP,
- s1_d_location_region as location,
- MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- COUNT( * ) AS IP_COUNT_TOTAL
- from global_temp.media_expire_patch
- GROUP BY
- IP,
- location
- """.stripMargin)
- import org.apache.spark.sql.functions._
- val v_IP_DF = s_IP_DF.union(d_IP_DF).groupBy("IP", "location").agg(
- min("IP_FIRST_FOUND_TIME").as("IP_FIRST_FOUND_TIME"),
- max("IP_LAST_FOUND_TIME").as("IP_LAST_FOUND_TIME"),
- count("IP").as("IP_COUNT_TOTAL")
- )
-
- val e_Address_v_FQDN_to_v_IP_DF = spark.sql(
- """
- |SELECT
- | media_domain AS V_FQDN,
- | s1_d_ip AS V_IP,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( media_domain != '' )
- | AND ( s1_d_ip != '' )
- |GROUP BY
- | s1_d_ip,
- | media_domain
- """.stripMargin)
-
- val e_Visit_v_IP_to_v_FQDN_DF = spark.sql(
- """
- |SELECT
- | s1_s_ip AS V_IP,
- | media_domain AS V_FQDN,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( s1_s_ip != '' )
- | AND ( media_domain != '' )
- |GROUP BY
- | s1_s_ip,
- | media_domain
- """.stripMargin)
-
-
- /**
- * 获取数据库连接
- */
- arangoDB = new ArangoDB.Builder()
- .maxConnections(Config.MAXPOOLSIZE)
- .host("192.168.40.127", 8529)
- .user("root")
- .password("111111")
- .build
-
-
- /**
- * 更新FQDN点
- */
- v_FQDN_DF.printSchema()
- v_FQDN_DF.foreachPartition(iter => {
- val v_FQDN_Coll = arangoDB.db("insert_iplearn_index").collection("V_FQDN")
- val docs_Insert = new java.util.ArrayList[BaseDocument]()
- val docs_Update = new java.util.ArrayList[BaseDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("FQDN_NAME")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Last = row.getAs[Long]("FQDN_LAST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-
- if (v_FQDN_Coll.documentExists(fqdn)) {
- val document: BaseDocument = v_FQDN_Coll.getDocument(fqdn, classOf[BaseDocument])
- val fqdn_Cnt = Try(document.getAttribute("FQDN_COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("FQDN_COUNT_TOTAL", fqdn_Cnt)
- document.addAttribute("FQDN_LAST_FOUND_TIME", v_Fqdn_Last)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(fqdn)
- baseDocument.addAttribute("FQDN_NAME", fqdn)
- baseDocument.addAttribute("FQDN_FIRST_FOUND_TIME", v_Fqdn_First)
- baseDocument.addAttribute("FQDN_LAST_FOUND_TIME", v_Fqdn_Last)
- baseDocument.addAttribute("FQDN_COUNT_TOTAL", v_Fqdn_Cnt)
- docs_Insert.add(baseDocument)
- }
- i+=1
- })
- Try(v_FQDN_Coll.importDocuments(docs_Insert))
- Try(v_FQDN_Coll.updateDocuments(docs_Update))
- })
-
-
- /**
- * 更新IP点
- */
- v_IP_DF.printSchema()
- v_IP_DF.foreachPartition(iter => {
- val v_IP_Coll = arangoDB.db("insert_iplearn_index").collection("V_IP")
- val docs_Insert: java.util.ArrayList[BaseDocument] = new java.util.ArrayList[BaseDocument]()
- val docs_Update: java.util.ArrayList[BaseDocument] = new java.util.ArrayList[BaseDocument]()
- var i = 0
-
- iter.foreach(row => {
- val ip = row.getAs[String]("IP")
- val location = row.getAs[String]("location")
- val v_IP_First = row.getAs[Long]("IP_FIRST_FOUND_TIME")
- val v_IP_Last = row.getAs[Long]("IP_LAST_FOUND_TIME")
- val v_IP_Cnt = row.getAs[Long]("IP_COUNT_TOTAL")
-
- if (v_IP_Coll.documentExists(ip)) {
- val document: BaseDocument = v_IP_Coll.getDocument(ip, classOf[BaseDocument])
- val ip_Cnt = Try(document.getAttribute("IP_APPEAR_COUNT")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", v_IP_Last)
- document.addAttribute("IP_APPEAR_COUNT", v_IP_Cnt+ip_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(ip)
- baseDocument.addAttribute("IP", ip)
- baseDocument.addAttribute("IP_LOCATION", location)
- baseDocument.addAttribute("FIRST_FOUND_TIME", v_IP_First)
- baseDocument.addAttribute("LAST_FOUND_TIME", v_IP_Last)
- baseDocument.addAttribute("IP_APPEAR_COUNT", v_IP_Cnt)
- docs_Insert.add(baseDocument)
- }
- i+=1
- })
- Try(v_IP_Coll.importDocuments(docs_Insert))
- Try(v_IP_Coll.updateDocuments(docs_Update))
- })
-
- /**
- * 统计e_Address_Fqdn_to_IP
- */
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Address_v_FQDN_to_v_IP_DF.foreachPartition(iter => {
- val e_Add_Fqdn_to_IP_Coll = arangoDB.db("insert_iplearn_index").collection("E_ADDRESS_V_FQDN_TO_V_IP")
- val docs_Insert: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- val docs_Update: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
-
- if (e_Add_Fqdn_to_IP_Coll.documentExists(fqdn+"-"+ip)) {
- val document: BaseEdgeDocument = e_Add_Fqdn_to_IP_Coll.getDocument(fqdn+"-"+ip, classOf[BaseEdgeDocument])
- val e_new_Cnt = Try(document.getAttribute("COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", e_Last)
- document.addAttribute("COUNT_TOTAL", e_new_Cnt+e_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseEdgeDocument = new BaseEdgeDocument()
- baseDocument.setKey(fqdn+"-"+ip)
- baseDocument.setFrom(s"V_FQDN/$fqdn")
- baseDocument.setTo(s"V_IP/$ip")
- baseDocument.addAttribute("COUNT_TOTAL",e_Cnt)
- baseDocument.addAttribute("FIRST_FOUND_TIME",e_First)
- baseDocument.addAttribute("LAST_FOUND_TIME",e_Last)
- docs_Insert.add(baseDocument)
- }
-// println(fqdn+"-"+ip)
-
- })
- Try(e_Add_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Add_Fqdn_to_IP_Coll.updateDocuments(docs_Update))
- })
-
- /**
- * 统计e_Visit_v_IP_to_v_FQDN
- */
- e_Visit_v_IP_to_v_FQDN_DF.printSchema()
- e_Visit_v_IP_to_v_FQDN_DF.foreachPartition(iter => {
- val e_Visit_Fqdn_to_IP_Coll = arangoDB.db("insert_iplearn_index").collection("E_VISIT_V_IP_TO_V_FQDN")
- val docs_Insert: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- val docs_Update: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
-
- if (e_Visit_Fqdn_to_IP_Coll.documentExists(ip+"-"+fqdn)) {
- val document: BaseEdgeDocument = e_Visit_Fqdn_to_IP_Coll.getDocument(ip+"-"+fqdn, classOf[BaseEdgeDocument])
- val e_new_Cnt = Try(document.getAttribute("COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", e_Last)
- document.addAttribute("COUNT_TOTAL", e_new_Cnt+e_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseEdgeDocument = new BaseEdgeDocument()
- baseDocument.setKey(ip+"-"+fqdn)
- baseDocument.setFrom(s"V_IP/$ip")
- baseDocument.setTo(s"V_FQDN/$fqdn")
- baseDocument.addAttribute("COUNT_TOTAL",e_Cnt)
- baseDocument.addAttribute("FIRST_FOUND_TIME",e_First)
- baseDocument.addAttribute("LAST_FOUND_TIME",e_Last)
- docs_Insert.add(baseDocument)
- }
- i+=1
- })
- Try(e_Visit_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Visit_Fqdn_to_IP_Coll.updateDocuments(docs_Update))
-
- })
-
- arangoDB.shutdown()
- }
-
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemory.scala b/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemory.scala
deleted file mode 100644
index 7267619..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemory.scala
+++ /dev/null
@@ -1,355 +0,0 @@
-package cn.ac.iie.test
-
-import cn.ac.iie.utils.ConfigUtils
-import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
-import com.arangodb.model.AqlQueryOptions
-import com.arangodb.util.MapBuilder
-import com.arangodb.{ArangoCursor, ArangoDB}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-import org.slf4j.{Logger, LoggerFactory}
-
-import scala.util.Try
-
-object ArangoDbTestMemory {
- private val logger: Logger = LoggerFactory.getLogger(ArangoDbTestMemory.getClass)
- @transient
- var arangoDB: ArangoDB = _
-
-
- def main(args: Array[String]): Unit = {
-
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", Config.SPARK_SERIALIZER)
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
- .getOrCreate()
- logger.warn("sparkession获取成功!!!")
-
- val mediaDataFrame: DataFrame = spark.read.format("jdbc")
- .option("url", "jdbc:clickhouse://192.168.40.193:8123")
- .option("dbtable", s"(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where recv_time>=${Config.MINTIME} and recv_time<=${Config.MAXTIME})")
- .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
- .option("user", "default")
- .option("password", "111111")
- .option("numPartitions", Config.NUMPARTITIONS)
- .option("partitionColumn", "recv_time")
- .option("lowerBound", Config.MINTIME)
- .option("upperBound", Config.MAXTIME)
- .option("fetchsize", Config.SPARK_SQL_READ_FETCHSIZE)
- .option("socket_timeout",Config.CLICKHOUSE_SOCKET_TIMEOUT)
- .load()
- mediaDataFrame.printSchema()
- mediaDataFrame.createOrReplaceGlobalTempView("media_expire_patch")
-
- val v_FQDN_DF = spark.sql(
- """
- |SELECT
- | media_domain AS FQDN_NAME,
- | MIN( recv_time ) AS FQDN_FIRST_FOUND_TIME,
- | MAX( recv_time ) AS FQDN_LAST_FOUND_TIME,
- | COUNT( * ) AS FQDN_COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | media_domain != ''
- |GROUP BY
- | media_domain
- """.stripMargin
- )
- val s_IP_DF = spark.sql(
- """
- select
- s1_s_ip as IP,
- s1_s_location_region as location,
- MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- COUNT( * ) AS IP_COUNT_TOTAL
- from global_temp.media_expire_patch
- GROUP BY
- IP,
- location
- """.stripMargin)
-
- val d_IP_DF = spark.sql(
- """
- select
- s1_d_ip as IP,
- s1_d_location_region as location,
- MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- COUNT( * ) AS IP_COUNT_TOTAL
- from global_temp.media_expire_patch
- GROUP BY
- IP,
- location
- """.stripMargin)
- import org.apache.spark.sql.functions._
- val v_IP_DF = s_IP_DF.union(d_IP_DF).groupBy("IP", "location").agg(
- min("IP_FIRST_FOUND_TIME").as("IP_FIRST_FOUND_TIME"),
- max("IP_LAST_FOUND_TIME").as("IP_LAST_FOUND_TIME"),
- count("IP").as("IP_COUNT_TOTAL")
- )
-
- val e_Address_v_FQDN_to_v_IP_DF: DataFrame = spark.sql(
- """
- |SELECT
- | media_domain AS V_FQDN,
- | s1_d_ip AS V_IP,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( media_domain != '' )
- | AND ( s1_d_ip != '' )
- |GROUP BY
- | s1_d_ip,
- | media_domain
- """.stripMargin)
-
- val e_Visit_v_IP_to_v_FQDN_DF = spark.sql(
- """
- |SELECT
- | s1_s_ip AS V_IP,
- | media_domain AS V_FQDN,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( s1_s_ip != '' )
- | AND ( media_domain != '' )
- |GROUP BY
- | s1_s_ip,
- | media_domain
- """.stripMargin)
-
-
- /**
- * 获取数据库连接
- */
- arangoDB = new ArangoDB.Builder()
- .maxConnections(Config.MAXPOOLSIZE)
- .host(ConfigUtils.ARANGODB_HOST, ConfigUtils.ARANGODB_PORT)
- .user(ConfigUtils.ARANGODB_USER)
- .password(ConfigUtils.ARANGODB_PASSWORD)
- .build
- val bindVars = new MapBuilder().get
- val options = new AqlQueryOptions()
- .ttl(Config.ARANGODB_TTL)
-
- val v_FQDN_Mutabal_Map = new java.util.HashMap[String,BaseDocument](1048576,0.9f)
- val v_IP_Mutabal_Map = new java.util.HashMap[String,BaseDocument](16048576,0.9f)
- val e_FQDN_Address_IP_Mutabal_Map = new java.util.HashMap[String,BaseEdgeDocument](1048576,0.9f)
- val e_IP_Visit_FQDN_Mutabal_Map = new java.util.HashMap[String,BaseEdgeDocument](30408576,0.9f)
-
- /**
- * 更新FQDN点
- */
- val v_FQDN_Query = "FOR doc IN V_FQDN RETURN doc"
- val v_FQDN_Cursor: ArangoCursor[BaseDocument] = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME)
- .query(v_FQDN_Query, bindVars, options, classOf[BaseDocument])
- while (v_FQDN_Cursor.hasNext){
- val document = v_FQDN_Cursor.next()
- v_FQDN_Mutabal_Map.put(document.getKey,document)
- }
- val v_FQDN_Map= spark.sparkContext.broadcast(v_FQDN_Mutabal_Map)
- v_FQDN_Mutabal_Map.clear()
- v_FQDN_DF.show(20)
- v_FQDN_DF.printSchema()
- v_FQDN_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val v_FQDN_Coll = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("V_FQDN")
- val docs_Insert = new java.util.ArrayList[BaseDocument]()
- val docs_Update = new java.util.ArrayList[BaseDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("FQDN_NAME")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Last = row.getAs[Long]("FQDN_LAST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-
- val doc = v_FQDN_Map.value.getOrDefault(fqdn, null)
- if (doc != null) {
- val document: BaseDocument = doc
- val fqdn_Cnt = Try(document.getAttribute("FQDN_COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("FQDN_COUNT_TOTAL", fqdn_Cnt)
- document.addAttribute("LAST_FOUND_TIME", v_Fqdn_Last)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(fqdn)
- baseDocument.addAttribute("FQDN_NAME", fqdn)
- baseDocument.addAttribute("FIRST_FOUND_TIME", v_Fqdn_First)
- baseDocument.addAttribute("LAST_FOUND_TIME", v_Fqdn_Last)
- baseDocument.addAttribute("FQDN_COUNT_TOTAL", v_Fqdn_Cnt)
- docs_Insert.add(baseDocument)
- }
- })
-// Try(v_FQDN_Coll.importDocuments(docs_Insert))
- v_FQDN_Coll.importDocuments(docs_Insert)
- Try(v_FQDN_Coll.updateDocuments(docs_Update))
- })
- v_FQDN_Map.destroy()
-
- /**
- * 更新IP点
- */
- val v_IP_Query = "FOR doc IN V_IP RETURN doc"
- val v_IP_Cursor: ArangoCursor[BaseDocument] = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME)
- .query(v_IP_Query, bindVars, options, classOf[BaseDocument])
- while (v_IP_Cursor.hasNext){
- val document = v_IP_Cursor.next()
- v_IP_Mutabal_Map.put(document.getKey ,document)
- }
- val v_IP_Map = spark.sparkContext.broadcast(v_IP_Mutabal_Map)
-// val v_IP_Map = v_IP_Mutabal_Map.toMap
- v_IP_Mutabal_Map.clear()
- v_IP_DF.printSchema()
- v_IP_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val v_IP_Coll = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("V_IP")
- val docs_Insert: java.util.ArrayList[BaseDocument] = new java.util.ArrayList[BaseDocument]()
- val docs_Update: java.util.ArrayList[BaseDocument] = new java.util.ArrayList[BaseDocument]()
- var i = 0
-
- iter.foreach(row => {
- val ip = row.getAs[String]("IP")
- val location = row.getAs[String]("location")
- val v_IP_First = row.getAs[Long]("IP_FIRST_FOUND_TIME")
- val v_IP_Last = row.getAs[Long]("IP_LAST_FOUND_TIME")
- val v_IP_Cnt = row.getAs[Long]("IP_COUNT_TOTAL")
- val doc = v_IP_Map.value.getOrDefault(ip, null)
-
- if (doc != null) {
- val document: BaseDocument = doc
- val ip_Cnt = Try(document.getAttribute("IP_APPEAR_COUNT")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", v_IP_Last)
- document.addAttribute("IP_APPEAR_COUNT", v_IP_Cnt+ip_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(ip)
- baseDocument.addAttribute("IP", ip)
- baseDocument.addAttribute("IP_LOCATION", location)
- baseDocument.addAttribute("FIRST_FOUND_TIME", v_IP_First)
- baseDocument.addAttribute("LAST_FOUND_TIME", v_IP_Last)
- baseDocument.addAttribute("IP_APPEAR_COUNT", v_IP_Cnt)
- docs_Insert.add(baseDocument)
- }
- })
- Try(v_IP_Coll.importDocuments(docs_Insert))
- Try(v_IP_Coll.updateDocuments(docs_Update))
- })
- v_IP_Map.destroy()
-
-
- /**
- * 统计e_Address_Fqdn_to_IP
- */
- val e_FQDN_Address_IP_Query = "FOR doc IN E_ADDRESS_V_FQDN_TO_V_IP RETURN doc"
- val e_FQDN_Address_IP_Cursor: ArangoCursor[BaseEdgeDocument] = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME)
- .query(e_FQDN_Address_IP_Query, bindVars, options, classOf[BaseEdgeDocument])
- while (e_FQDN_Address_IP_Cursor.hasNext){
- val document = e_FQDN_Address_IP_Cursor.next()
- e_FQDN_Address_IP_Mutabal_Map.put(document.getKey ,document)
- }
- val e_FQDN_Address_IP_Map = spark.sparkContext.broadcast(e_FQDN_Address_IP_Mutabal_Map)
- e_FQDN_Address_IP_Mutabal_Map.clear()
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Address_v_FQDN_to_v_IP_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val e_Add_Fqdn_to_IP_Coll = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("E_ADDRESS_V_FQDN_TO_V_IP")
- val docs_Insert: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- val docs_Update: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
- val doc = e_FQDN_Address_IP_Map.value.getOrDefault(fqdn+"-"+ip, null)
- if (doc != null) {
- val document: BaseEdgeDocument = doc
- val e_new_Cnt = Try(document.getAttribute("COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.setFrom(s"V_FQDN/$fqdn")
- document.setTo(s"V_IP/$ip")
- document.addAttribute("LAST_FOUND_TIME", e_Last)
- document.addAttribute("COUNT_TOTAL", e_new_Cnt+e_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseEdgeDocument = new BaseEdgeDocument()
- baseDocument.setKey(fqdn+"-"+ip)
- baseDocument.setFrom(s"V_FQDN/$fqdn")
- baseDocument.setTo(s"V_IP/$ip")
- baseDocument.addAttribute("COUNT_TOTAL",e_Cnt)
- baseDocument.addAttribute("FIRST_FOUND_TIME",e_First)
- baseDocument.addAttribute("LAST_FOUND_TIME",e_Last)
- docs_Insert.add(baseDocument)
- }
- })
- Try(e_Add_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Add_Fqdn_to_IP_Coll.updateDocuments(docs_Update))
- })
- e_FQDN_Address_IP_Map.destroy()
-
-
- /**
- * 统计e_Visit_v_IP_to_v_FQDN
- */
- val e_IP_Visit_FQDN_Query = "FOR doc IN E_VISIT_V_IP_TO_V_FQDN RETURN doc"
- val e_IP_Visit_FQDN_Cursor: ArangoCursor[BaseEdgeDocument] = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME)
- .query(e_IP_Visit_FQDN_Query, bindVars, options, classOf[BaseEdgeDocument])
- while (e_IP_Visit_FQDN_Cursor.hasNext){
- val document = e_IP_Visit_FQDN_Cursor.next()
- e_IP_Visit_FQDN_Mutabal_Map.put(document.getKey ,document)
- }
- val e_IP_Visit_FQDN_Map = spark.sparkContext.broadcast(e_IP_Visit_FQDN_Mutabal_Map)
-
- e_IP_Visit_FQDN_Mutabal_Map.clear()
- e_Visit_v_IP_to_v_FQDN_DF.printSchema()
- e_Visit_v_IP_to_v_FQDN_DF.coalesce(Config.REPARTITION_NUMBER).foreachPartition(iter => {
- val e_Visit_Fqdn_to_IP_Coll = arangoDB.db(ConfigUtils.ARANGODB_DB_NAME).collection("E_VISIT_V_IP_TO_V_FQDN")
- val docs_Insert: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- val docs_Update: java.util.ArrayList[BaseEdgeDocument] = new java.util.ArrayList[BaseEdgeDocument]()
- var i = 0
- iter.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
- val doc = e_IP_Visit_FQDN_Map.value.getOrDefault(ip+"-"+fqdn, null)
-
- if (doc != null) {
- val document: BaseEdgeDocument = doc
- val e_new_Cnt = Try(document.getAttribute("COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("LAST_FOUND_TIME", e_Last)
- document.addAttribute("COUNT_TOTAL", e_new_Cnt+e_Cnt)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseEdgeDocument = new BaseEdgeDocument()
- baseDocument.setKey(ip+"-"+fqdn)
- baseDocument.setFrom("V_IP/"+ip)
- baseDocument.setTo("V_FQDN/"+fqdn)
- baseDocument.addAttribute("COUNT_TOTAL",e_Cnt)
- baseDocument.addAttribute("FIRST_FOUND_TIME",e_First)
- baseDocument.addAttribute("LAST_FOUND_TIME",e_Last)
- docs_Insert.add(baseDocument)
- }
- })
- Try(e_Visit_Fqdn_to_IP_Coll.importDocuments(docs_Insert))
- Try(e_Visit_Fqdn_to_IP_Coll.updateDocuments(docs_Update))
-
- })
- e_IP_Visit_FQDN_Map.destroy()
- arangoDB.shutdown()
-
- spark.close()
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemoryGroupBy.scala b/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemoryGroupBy.scala
deleted file mode 100644
index 5e51419..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/ArangoDbTestMemoryGroupBy.scala
+++ /dev/null
@@ -1,40 +0,0 @@
-package cn.ac.iie.test
-
-import cn.ac.iie.dao.{BaseMediaDataLoad, UpdateArangoGraphByDF}
-import cn.ac.iie.utils.{ConfigUtils, InitArangoDBPool}
-import org.apache.spark.sql.SparkSession
-import org.slf4j.{Logger, LoggerFactory}
-
-object ArangoDbTestMemoryGroupBy {
-
- private val logger: Logger = LoggerFactory.getLogger(ArangoDbTestMemoryGroupBy.getClass)
-
- def main(args: Array[String]): Unit = {
- val spark: SparkSession = SparkSession
- .builder()
- .appName(ConfigUtils.SPARK_APP_NAME)
- .config("spark.serializer", ConfigUtils.SPARK_SERIALIZER)
- .config("spark.network.timeout", ConfigUtils.SPARK_NETWORK_TIMEOUT)
- .config("spark.sql.shuffle.partitions", ConfigUtils.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", ConfigUtils.SPARK_EXECUTOR_MEMORY)
- .master(ConfigUtils.MASTER)
- .getOrCreate()
- logger.warn("sparkession获取成功!!!")
-
- BaseMediaDataLoad.loadMediaDate(spark)
- val v_FQDN_DF = BaseMediaDataLoad.getFQDNVertexFromMedia(spark)
- val v_IP_DF = BaseMediaDataLoad.getIPVertexFromMedia(spark)
- val e_Address_v_FQDN_to_v_IP_DF = BaseMediaDataLoad.getFQDNAddressIPEdgeFromMedia(spark)
- val e_Visit_v_IP_to_v_FQDN_DF= BaseMediaDataLoad.getIPVisitFQDNEdgeFromMedia(spark)
-
- UpdateArangoGraphByDF.updateFQDNVertex(v_FQDN_DF,spark)
- UpdateArangoGraphByDF.updateIPVertex(v_IP_DF,spark)
- UpdateArangoGraphByDF.updateFQDNAddressIPEdge(e_Address_v_FQDN_to_v_IP_DF,spark)
- UpdateArangoGraphByDF.updateIPVisitFQDNEdge(e_Visit_v_IP_to_v_FQDN_DF,spark)
-
- InitArangoDBPool.arangoDB.shutdown()
- spark.close()
-
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/Config.scala b/ip-learning/src/main/scala/cn/ac/iie/test/Config.scala
deleted file mode 100644
index 699cc16..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/Config.scala
+++ /dev/null
@@ -1,22 +0,0 @@
-package cn.ac.iie.test
-
-import com.typesafe.config.{Config, ConfigFactory}
-
-object Config {
- private lazy val config: Config = ConfigFactory.load()
- val SPARK_SQL_SHUFFLE_PARTITIONS: String = config.getString("spark.sql.shuffle.partitions")
- val SPARK_SQL_READ_FETCHSIZE: String = config.getString("spark.sql.read.fetchsize")
- val SPARK_EXECUTOR_MEMORY: String = config.getString("spark.executor.memory")
- val NUMPARTITIONS: String = config.getString("numPartitions")
- val MASTER: String = config.getString("master")
- val MAXPOOLSIZE: Int = config.getInt("maxPoolSize")
- val MINTIME: String = config.getString("minTime")
- val MAXTIME: String = config.getString("maxTime")
-
- val REPARTITION_NUMBER: Int = config.getInt("repartitionNumber")
- val ARANGODB_BATCH: Int = config.getInt("arangoDB.batch")
- val ARANGODB_TTL: Int = config.getInt("arangoDB.ttl")
- val CLICKHOUSE_SOCKET_TIMEOUT: Int = config.getInt("clickhouse.socket.timeout")
- val SPARK_SERIALIZER: String = config.getString("spark.serializer")
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/ReadClickhouseTest.scala b/ip-learning/src/main/scala/cn/ac/iie/test/ReadClickhouseTest.scala
deleted file mode 100644
index 3ff1742..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/ReadClickhouseTest.scala
+++ /dev/null
@@ -1,447 +0,0 @@
-package cn.ac.iie.test
-
-import com.orientechnologies.orient.core.db.{ODatabasePool, OPartitionedDatabasePool}
-import com.orientechnologies.orient.core.sql.OCommandSQL
-import com.tinkerpop.blueprints.impls.orient.{OrientGraph, OrientGraphFactory}
-import com.tinkerpop.blueprints.{Direction, Edge, Vertex}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-import org.slf4j.{Logger, LoggerFactory}
-
-import scala.util.Try
-
-object ReadClickhouseTest {
- private val logger: Logger = LoggerFactory.getLogger(ReadClickhouseTest.getClass)
- @transient
- var factory: OrientGraphFactory = _
-
- @transient
- var pool: ODatabasePool = _
-
- def main(args: Array[String]): Unit = {
- // val warehouseLocation = new File("spark-warehouse").getAbsolutePath
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
- // .config("spark.kryo.classesToRegister","com.tinkerpop.blueprints.impls.orient.OrientGraphFactory")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
-
- /*
- .config("spark.driver.host", "192.168.41.79")
- .config("spark.jars", "D:\\GITREPO\\ip-learning\\target\\ip-learning-1.0-SNAPSHOT-jar-with-dependencies.jar")
- .master("spark://192.168.40.119:7077")
- */
- .getOrCreate()
- logger.warn("sparkession获取成功!!!")
-
- // val sql = "(select * from av_miner.media_expire_patch_local limit 1000)"
-
- val mediaDataFrame: DataFrame = spark.read.format("jdbc")
- .option("url", "jdbc:clickhouse://192.168.40.193:8123")
-// .option("dbtable", "av_miner.media_expire_patch")
-// .option("dbtable", "(select * from av_miner.media_expire_patch limit 10)")
-// .option("dbtable","(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where media_domain not LIKE '%\\n%')")
- .option("dbtable",s"(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where recv_time>=${Config.MINTIME} and recv_time<=${Config.MAXTIME})")
-// .option("dbtable","(select media_domain,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region,min(recv_time) as min_recv_time,max(recv_time) as max_recv_time from av_miner.media_expire_patch group by media_domain,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region limit 10)")
- .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
- .option("user", "default")
- .option("password", "111111")
- .option("numPartitions", Config.NUMPARTITIONS)
- .option("partitionColumn", "recv_time")
- .option("lowerBound", Config.MINTIME)
- .option("upperBound", Config.MAXTIME)
- .option("fetchsize", Config.SPARK_SQL_READ_FETCHSIZE)
- .load()
- // mediaDataFrame.printSchema()
- mediaDataFrame.createOrReplaceGlobalTempView("media_expire_patch")
- // val mediaDataGlobalView = spark.sql("select * from global_temp.media_expire_patch limit 10")
- // mediaDataGlobalView.show()
-
- val v_FQDN_DF = spark.sql(
- """
- |SELECT
- | media_domain AS FQDN_NAME,
- | MIN( recv_time ) AS FQDN_FIRST_FOUND_TIME,
- | MAX( recv_time ) AS FQDN_LAST_FOUND_TIME,
- | COUNT( * ) AS FQDN_COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | media_domain != ''
- |GROUP BY
- | media_domain
- """.stripMargin
- )
- val s_IP_DF = spark.sql(
- """
- select
- s1_s_ip as IP,
- s1_s_location_region as location,
- MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- COUNT( * ) AS IP_COUNT_TOTAL
- from global_temp.media_expire_patch
- GROUP BY
- IP,
- location
- """.stripMargin)
-
- val d_IP_DF = spark.sql(
- """
- select
- s1_d_ip as IP,
- s1_d_location_region as location,
- MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- COUNT( * ) AS IP_COUNT_TOTAL
- from global_temp.media_expire_patch
- GROUP BY
- IP,
- location
- """.stripMargin)
- import org.apache.spark.sql.functions._
- val v_IP_DF = s_IP_DF.union(d_IP_DF).groupBy("IP","location").agg(
- min("IP_FIRST_FOUND_TIME") .as("IP_FIRST_FOUND_TIME"),
- max("IP_LAST_FOUND_TIME").as("IP_LAST_FOUND_TIME"),
- count("IP").as("IP_COUNT_TOTAL")
- )
-
- /*
- val v_IP_DF = spark.sql(
- """
- |SELECT
- | IP,
- | location,
- | MIN( recv_time ) AS IP_FIRST_FOUND_TIME,
- | MAX( recv_time ) AS IP_LAST_FOUND_TIME,
- | COUNT( * ) AS IP_COUNT_TOTAL
- |FROM
- | (
- | ( SELECT s1_s_ip AS IP, s1_s_location_region AS location, recv_time FROM global_temp.media_expire_patch ) UNION ALL
- | ( SELECT s1_d_ip AS IP, s1_d_location_region AS location, recv_time FROM global_temp.media_expire_patch )
- | )
- |GROUP BY
- | IP,
- | location
- """.stripMargin)
- */
- val e_Address_v_FQDN_to_v_IP_DF = spark.sql(
- """
- |SELECT
- | media_domain AS V_FQDN,
- | s1_d_ip AS V_IP,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( media_domain != '' )
- | AND ( s1_d_ip != '' )
- |GROUP BY
- | s1_d_ip,
- | media_domain
- """.stripMargin)
-
- val e_Visit_v_IP_to_v_FQDN_DF = spark.sql(
- """
- |SELECT
- | s1_s_ip AS V_IP,
- | media_domain AS V_FQDN,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( s1_s_ip != '' )
- | AND ( media_domain != '' )
- |GROUP BY
- | s1_s_ip,
- | media_domain
- """.stripMargin)
-
-
- /**
- * 获取数据库连接
- */
- val uri: String = "remote:192.168.40.127/iplearning-insert"
-// val uri: String = "remote:192.168.40.207/iplearing-test"
-// val uri: String = "remote:192.168.40.152:2424;192.168.40.151:2424:192.168.40.153:2424/iplearing-test"
- val pool = new OPartitionedDatabasePool(uri, "root", "111111", Config.MAXPOOLSIZE, Config.MAXPOOLSIZE)
- factory = new OrientGraphFactory(uri, "root", "111111", pool)
- factory.setConnectionStrategy("ROUND_ROBIN_CONNECT")
-
- /**
- * 更新FQDN点
- */
- v_FQDN_DF.printSchema()
- v_FQDN_DF.foreach(row => {
- val fqdn = row.getAs[String]("FQDN_NAME")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Last = row.getAs[Long]("FQDN_LAST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-// val graph = factory.getNoTx
- val graph: OrientGraph = factory.getTx
- var v_Fqdn_Obj: Vertex = null
- import scala.collection.JavaConversions._
-
- if (graph.getVertices("v_FQDN.FQDN_NAME", fqdn).nonEmpty) {
- for (v: Vertex <- graph.getVertices("v_FQDN.FQDN_NAME", fqdn)) {
- val update_Fqdn_Last = v.getProperty[Long]("LAST_FOUND_TIME")
- val update_Fqdn_Cnt = v.getProperty[Long]("FQDN_APPEAR_COUNT")
- val sqlComm = new OCommandSQL(
- s"UPDATE v_FQDN SET LAST_FOUND_TIME = $update_Fqdn_Last,FQDN_APPEAR_COUNT = ${update_Fqdn_Cnt + v_Fqdn_Cnt} WHERE FQDN_NAME == '$fqdn'")
- Try(graph.command(sqlComm).execute())
- println("update fqdn:"+fqdn)
- v_Fqdn_Obj = v
- }
- } else {
- v_Fqdn_Obj = graph.addVertex("class:v_FQDN", Nil: _*)
-
- v_Fqdn_Obj.setProperty("FQDN_NAME", fqdn)
- v_Fqdn_Obj.setProperty("FIRST_FOUND_TIME", v_Fqdn_First)
- v_Fqdn_Obj.setProperty("LAST_FOUND_TIME", v_Fqdn_Last)
- v_Fqdn_Obj.setProperty("FQDN_APPEAR_COUNT", v_Fqdn_Cnt)
- println("insert fqdn:"+fqdn)
- }
- var i = 0
- i = i+1
- if (i == 10000){
- graph.commit()
- }
- })
- factory.getTx.commit()
-
- /**
- * 更新IP点
- */
- v_IP_DF.printSchema()
- v_IP_DF.foreach(row => {
- val ip = row.getAs[String]("IP")
- val location = row.getAs[String]("location")
- val v_IP_First = row.getAs[Long]("IP_FIRST_FOUND_TIME")
- val v_IP_Last = row.getAs[Long]("IP_LAST_FOUND_TIME")
- val v_IP_Cnt = row.getAs[Long]("IP_COUNT_TOTAL")
-// val graph = factory.getNoTx
-
- val graph = factory.getTx
- var v_IP_Obj: Vertex = null
- import scala.collection.JavaConversions._
- if (graph.getVertices("v_IP.IP", ip).nonEmpty) {
- for (v: Vertex <- graph.getVertices("v_IP.IP", ip)) {
- val update_IP_Last = v.getProperty[Long]("LAST_FOUND_TIME")
- val update_IP_Cnt = v.getProperty[Long]("IP_APPEAR_COUNT")
- val sqlComm = new OCommandSQL(
- s"UPDATE v_IP SET LAST_FOUND_TIME = $update_IP_Last,FQDN_APPEAR_COUNT = ${update_IP_Cnt + v_IP_Cnt} "
- + s"WHERE IP == '$ip'")
- Try(graph.command(sqlComm).execute())
- println("update ip:"+ip)
- v_IP_Obj = v
- }
- } else {
- v_IP_Obj = graph.addVertex("class:v_IP", Nil: _*)
-
- v_IP_Obj.setProperty("IP", ip)
- v_IP_Obj.setProperty("IP_LOCATION", location)
- v_IP_Obj.setProperty("FIRST_FOUND_TIME", v_IP_First)
- v_IP_Obj.setProperty("LAST_FOUND_TIME", v_IP_Last)
- v_IP_Obj.setProperty("IP_APPEAR_COUNT", v_IP_Cnt)
- println("insert ip:"+ip)
- }
- var i = 0
- i = i+1
- if (i == 10000){
- graph.commit()
- }
- })
- factory.getTx.commit()
-
- /**
- * 统计e_Address_Fqdn_to_IP
- */
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Address_v_FQDN_to_v_IP_DF.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
-
- val graph = factory.getNoTx
- var v_Fqdn_Obj: Vertex = null
- var v_IP_Obj: Vertex = null
- var e_Edge_Obj:Edge = null
-
- import scala.collection.JavaConversions._
-
- //获取fqdn点
- if (graph.getVertices("v_FQDN.FQDN_NAME", fqdn).nonEmpty) {
- for (v: Vertex <- graph.getVertices("v_FQDN.FQDN_NAME", fqdn)) {
- v_Fqdn_Obj = v
- }
- } else {
- v_Fqdn_Obj = graph.addVertex("class:v_FQDN", Nil: _*)
- v_Fqdn_Obj.setProperty("FQDN_NAME", fqdn)
- v_Fqdn_Obj.setProperty("FIRST_FOUND_TIME", 0)
- v_Fqdn_Obj.setProperty("LAST_FOUND_TIME", 0)
- v_Fqdn_Obj.setProperty("FQDN_APPEAR_COUNT", 0)
-
- }
-
- //获取IP点
- if (graph.getVertices("v_IP.IP", ip).nonEmpty) {
- for (v: Vertex <- graph.getVertices("v_IP.IP", ip)) {
- v_IP_Obj = v
- }
- } else {
- v_IP_Obj = graph.addVertex("class:v_IP", Nil: _*)
-
- v_IP_Obj.setProperty("IP", ip)
- v_IP_Obj.setProperty("IP_LOCATION", "")
- v_IP_Obj.setProperty("FIRST_FOUND_TIME", 0)
- v_IP_Obj.setProperty("LAST_FOUND_TIME", 0)
- v_IP_Obj.setProperty("IP_APPEAR_COUNT", 0)
- }
-// println("e_address_egde:"+v_Fqdn_Obj.getProperty[String]("FQDN_NAME")+"-"+v_IP_Obj.getProperty[String]("IP"))
- //添加或更新边
- for (e: Edge <- v_Fqdn_Obj.getEdges(Direction.OUT)) {
- if (e.getVertex(Direction.IN).getProperty[String]("IP") == ip){
- val cnt = e.getProperty[Long]("COUNT_TOTAL")
- e.setProperty("COUNT_TOTAL",e_Cnt+cnt)
- e.setProperty("LAST_FOUND_TIME",e_Last)
- println("update e_address_egde:"+fqdn+"-"+ip)
- e_Edge_Obj = e
- }
- }
- if (e_Edge_Obj != null){
- val newEdge = graph.addEdge(null, v_Fqdn_Obj, v_IP_Obj, "E_ADDRESS_V_FQDN_TO_V_IP")
- newEdge.setProperty("COUNT_TOTAL",e_Cnt)
- newEdge.setProperty("FIRST_FOUND_TIME",e_First)
- newEdge.setProperty("LAST_FOUND_TIME",e_Last)
- println("insert e_address_egde:"+fqdn+"-"+ip)
- }
- })
-
- /**
- * 统计e_Visit_v_IP_to_v_FQDN
- */
- e_Visit_v_IP_to_v_FQDN_DF.printSchema()
- e_Visit_v_IP_to_v_FQDN_DF.foreach(row => {
- val fqdn = row.getAs[String]("V_FQDN")
- val ip = row.getAs[String]("V_IP")
- val e_First = row.getAs[Long]("FIRST_FOUND_TIME")
- val e_Last = row.getAs[Long]("LAST_FOUND_TIME")
- val e_Cnt = row.getAs[Long]("COUNT_TOTAL")
-
- val graph = factory.getNoTx
- var v_Fqdn_Obj: Vertex = null
- var v_IP_Obj: Vertex = null
- var e_Edge_Obj:Edge = null
-
- import scala.collection.JavaConversions._
-
- //添加或更新fqdn点
- if (graph.getVertices("v_FQDN.FQDN_NAME", fqdn).nonEmpty) {
- for (v: Vertex <- graph.getVertices("v_FQDN.FQDN_NAME", fqdn)) {
- v_Fqdn_Obj = v
- }
- } else {
- v_Fqdn_Obj = graph.addVertex("class:v_FQDN", Nil: _*)
- v_Fqdn_Obj.setProperty("IP", ip)
- v_Fqdn_Obj.setProperty("FIRST_FOUND_TIME", 0)
- v_Fqdn_Obj.setProperty("LAST_FOUND_TIME", 0)
- v_Fqdn_Obj.setProperty("FQDN_APPEAR_COUNT", 0)
- }
-
- //添加或更新IP点
- if (graph.getVertices("v_IP.IP", ip).nonEmpty) {
- for (v: Vertex <- graph.getVertices("v_IP.IP", ip)) {
- v_IP_Obj = v
- }
- } else {
- v_IP_Obj = graph.addVertex("class:v_IP", Nil: _*)
- v_IP_Obj.setProperty("FQDN_NAME", fqdn)
- v_IP_Obj.setProperty("IP_LOCATION", "")
- v_IP_Obj.setProperty("FIRST_FOUND_TIME", 0)
- v_IP_Obj.setProperty("LAST_FOUND_TIME", 0)
- v_IP_Obj.setProperty("IP_APPEAR_COUNT", 0)
- }
-// println("e_visit_egde:"+v_Fqdn_Obj.getProperty[String]("FQDN_NAME")+"-"+v_IP_Obj.getProperty[String]("IP"))
-
- //添加或更新边
- for (e: Edge <- v_IP_Obj.getEdges(Direction.OUT)) {
- if (e.getVertex(Direction.IN).getProperty[String]("FQDN_NAME") == fqdn){
- val cnt = e.getProperty[Long]("COUNT_TOTAL")
- e.setProperty("COUNT_TOTAL",e_Cnt+cnt)
- e.setProperty("LAST_FOUND_TIME",e_Last)
- println("update e_visit_egde:"+fqdn+"-"+ip)
- e_Edge_Obj = e
- }
- }
- if (e_Edge_Obj != null){
- val newEdge = graph.addEdge(null, v_Fqdn_Obj, v_IP_Obj, "E_VISIT_V_IP_TO_V_FQDN")
- newEdge.setProperty("COUNT_TOTAL",e_Cnt)
- newEdge.setProperty("FIRST_FOUND_TIME",e_First)
- newEdge.setProperty("LAST_FOUND_TIME",e_Last)
- println("insert e_visit_egde:"+fqdn+"-"+ip)
- }
- // graph.commit()
- })
-
-
-
-
- /*
- v_FQDN_DF.printSchema()
- v_FQDN_DF.coalesce(20).foreach(row => {
- val fqdn = row.getAs[String](0)
- val first = row.getAs[Long](1)
- val last = row.getAs[Long](2)
- val count = row.getAs[Long](3)
- val session = pool.acquire()
- val vertex = session.newVertex("v_FQDN")
- vertex.setProperty("FQDN_NAME",fqdn)
- vertex.setProperty("FIRST_FOUND_TIME", first)
- vertex.setProperty("LAST_FOUND_TIME", last)
- vertex.setProperty("FQDN_APPEAR_COUNT", count)
- vertex
- })
-
-
-
- v_IP_DF.printSchema()
- v_IP_DF.coalesce(20).foreach(row => {
- val ip = row.getAs[String](0)
- val first = row.getAs[Long](2)
- val last = row.getAs[Long](3)
- val count = row.getAs[Long](4)
- val tx: OrientGraph = factory.getTx
- val vertex = tx.addVertex("class:v_FQDN",Nil: _*)
- vertex.setProperties("FQDN_NAME",ip)
- vertex.setProperty("FIRST_FOUND_TIME", first)
- vertex.setProperty("LAST_FOUND_TIME", last)
- vertex.setProperty("FQDN_APPEAR_COUNT", count)
- tx.commit()
- })
-
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Address_v_FQDN_to_v_IP_DF.foreach(row => {
- val fqdn = row.getAs[String](0)
- val ip = row.getAs[String](2)
- val first = row.getAs[Long](3)
- val last = row.getAs[Long](4)
- val count = row.getAs[Long](5)
- val session = pool.acquire()
- val tx: OrientGraph = factory.getTx
- tx.getFeatures.supportsVertexProperties
- val vertex: OrientVertex = tx.getVertex()
- tx.addEdge(null,vertex,vertex,"")
- })
- */
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocument.scala b/ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocument.scala
deleted file mode 100644
index f653a51..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocument.scala
+++ /dev/null
@@ -1,29 +0,0 @@
-package cn.ac.iie.test
-
-import com.arangodb.entity.DocumentField
-import com.arangodb.entity.DocumentField.Type
-
-import scala.beans.BeanProperty
-
-class TestBaseEdgeDocument {
-
- @BeanProperty
- @DocumentField(Type.FROM)
- var from: String=""
- @BeanProperty
- @DocumentField(Type.TO)
- var to: String=""
- @BeanProperty
- @DocumentField(Type.KEY)
- var key: String=""
- @BeanProperty
- @DocumentField(Type.ID)
- var id: String=""
- @BeanProperty
- var FIRST_FOUND_TIME:Long = 0
- @BeanProperty
- var LAST_FOUND_TIME:Long = 0
- @BeanProperty
- var COUNT_TOTAL:Long = 0
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocumentDataFrame.scala b/ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocumentDataFrame.scala
deleted file mode 100644
index 65239bd..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/TestBaseEdgeDocumentDataFrame.scala
+++ /dev/null
@@ -1,35 +0,0 @@
-package cn.ac.iie.test
-
-import cn.ac.iie.dao.{BaseMediaDataLoad, UpdateArangoGraphByDF}
-import cn.ac.iie.utils.InitArangoDBPool
-import com.arangodb.entity.BaseEdgeDocument
-import com.arangodb.util.MapBuilder
-import com.arangodb.{ArangoCursor, ArangoDB}
-import org.apache.spark.sql.{DataFrame, Row, SparkSession}
-
-object TestBaseEdgeDocumentDataFrame {
- @transient
- var arangoDB: ArangoDB = _
- def main(args: Array[String]): Unit = {
-
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
- .getOrCreate()
-
- BaseMediaDataLoad.loadMediaDate(spark)
- val e_Address_v_FQDN_to_v_IP_DF = BaseMediaDataLoad.getFQDNAddressIPEdgeFromMedia(spark)
- UpdateArangoGraphByDF.updateFQDNAddressIPEdge(e_Address_v_FQDN_to_v_IP_DF,spark)
-
- InitArangoDBPool.arangoDB.shutdown()
- spark.close()
-
-
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/TestIndices.scala b/ip-learning/src/main/scala/cn/ac/iie/test/TestIndices.scala
deleted file mode 100644
index 80a9916..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/TestIndices.scala
+++ /dev/null
@@ -1,219 +0,0 @@
-package cn.ac.iie.test
-
-import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
-import com.arangodb.util.MapBuilder
-import com.arangodb.{ArangoCursor, ArangoDB}
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-import scala.util.Try
-
-object TestIndices {
- @transient
- var arangoDB: ArangoDB = _
- def main(args: Array[String]): Unit = {
-
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
- .getOrCreate()
-
- val mediaDataFrame: DataFrame = spark.read.format("jdbc")
- .option("url", "jdbc:clickhouse://192.168.40.193:8123")
- .option("dbtable", s"(select media_domain,recv_time,s1_s_ip,s1_d_ip,s1_s_location_region,s1_d_location_region from av_miner.media_expire_patch where recv_time>=${Config.MINTIME} and recv_time<=${Config.MAXTIME})")
- .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
- .option("user", "default")
- .option("password", "111111")
- .option("numPartitions", Config.NUMPARTITIONS)
- .option("partitionColumn", "recv_time")
- .option("lowerBound", Config.MINTIME)
- .option("upperBound", Config.MAXTIME)
- .option("fetchsize", Config.SPARK_SQL_READ_FETCHSIZE)
- .load()
- mediaDataFrame.printSchema()
- mediaDataFrame.createOrReplaceGlobalTempView("media_expire_patch")
-
- val v_FQDN_DF = spark.sql(
- """
- |SELECT
- | media_domain AS FQDN_NAME,
- | MIN( recv_time ) AS FQDN_FIRST_FOUND_TIME,
- | MAX( recv_time ) AS FQDN_LAST_FOUND_TIME,
- | COUNT( * ) AS FQDN_COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | media_domain != ''
- |GROUP BY
- | media_domain
- """.stripMargin
- )
- val time1 = System.currentTimeMillis()
-
- arangoDB = new ArangoDB.Builder()
- .maxConnections(Config.MAXPOOLSIZE)
- .host("192.168.40.127", 8529)
- .user("root")
- .password("111111")
- .build
-
- val dbName = "insert_iplearn_index"
- val collectionName = "V_FQDN"
- val query = "FOR doc IN " + collectionName + " RETURN doc"
- val bindVars = new MapBuilder().get
- val cursor: ArangoCursor[BaseEdgeDocument] = arangoDB.db(dbName).query(query, bindVars, null, classOf[BaseEdgeDocument])
- var cursor_Map = scala.collection.mutable.HashMap[String,BaseEdgeDocument]()
- while (cursor.hasNext){
- val document = cursor.next()
- cursor_Map += (document.getKey -> document)
- }
- val time2 = System.currentTimeMillis()
-
- println((time2 - time1)/1000)
- val docs_Insert = new java.util.ArrayList[BaseDocument]()
- val docs_Update = new java.util.ArrayList[BaseDocument]()
- v_FQDN_DF.foreach(row => {
- val fqdn = row.getAs[String]("FQDN_NAME")
- val v_Fqdn_First = row.getAs[Long]("FQDN_FIRST_FOUND_TIME")
- val v_Fqdn_Last = row.getAs[Long]("FQDN_LAST_FOUND_TIME")
- val v_Fqdn_Cnt = row.getAs[Long]("FQDN_COUNT_TOTAL")
-
- val doc = cursor_Map.getOrElse(fqdn, null)
- if (doc != null) {
- val document: BaseDocument = doc
- val fqdn_Cnt = Try(document.getAttribute("FQDN_COUNT_TOTAL")).getOrElse(0).toString.toInt
- document.addAttribute("FQDN_COUNT_TOTAL", fqdn_Cnt)
- document.addAttribute("FQDN_LAST_FOUND_TIME", v_Fqdn_Last)
- docs_Update.add(document)
- } else {
- val baseDocument: BaseDocument = new BaseDocument()
- baseDocument.setKey(fqdn)
- baseDocument.addAttribute("FQDN_NAME", fqdn)
- baseDocument.addAttribute("FQDN_FIRST_FOUND_TIME", v_Fqdn_First)
- baseDocument.addAttribute("FQDN_LAST_FOUND_TIME", v_Fqdn_Last)
- baseDocument.addAttribute("FQDN_COUNT_TOTAL", v_Fqdn_Cnt)
- docs_Insert.add(baseDocument)
- }
- })
-
-// Try(v_FQDN_Coll.importDocuments(docs_Insert))
-// Try(v_FQDN_Coll.updateDocuments(docs_Update))
-
-
-
-
- /*
- val db = arangoDB.db("insert_iplearn_index")
- val coll = db.collection("E_ADDRESS_V_FQDN_TO_V_IP")
- val docs = new java.util.ArrayList[BaseEdgeDocument]
- val baseEdgeDocument2 = new BaseEdgeDocument
- baseEdgeDocument2.setKey("test_edge_2.com")
- baseEdgeDocument2.setFrom("V_FQDN/test_edge_2_from")
- baseEdgeDocument2.setTo("V_IP/test_edge_2_to")
- baseEdgeDocument2.addAttribute("e_add_test_str", "1Two3")
- baseEdgeDocument2.addAttribute("e_add_test_num", 4321)
- docs.add(baseEdgeDocument2)
-
- coll.importDocuments(docs)
- arangoDB.shutdown()
-
-*/
-
- /*
- val uri: String = "remote:192.168.40.127/iplearning-insert"
- val pool = new OPartitionedDatabasePool(uri, "root", "111111", 5, 5)
- factory = new OrientGraphFactory(uri, "root", "111111", pool)
- val graph = factory.getNoTx
- val ip = "23.224.224.163"
- import scala.collection.JavaConversions._
- /*
- for (v: Vertex <- graph.getVertices("v_IP.IP", ip)) {
- val update_IP_Last = v.getProperty[Long]("LAST_FOUND_TIME")
- val update_IP_Cnt = v.getProperty[Long]("IP_APPEAR_COUNT")
- val sqlComm = new OCommandSQL(
- s"UPDATE v_IP SET LAST_FOUND_TIME = $update_IP_Last,FQDN_APPEAR_COUNT = 100 "
- + s"WHERE IP == '$ip'")
- Try(graph.command(sqlComm).execute())
- println("update ip:" + ip)
- }
-*/
- val v_IP_Obj = graph.addVertex("class:v_IP", Nil: _*)
-
- v_IP_Obj.setProperty("IP", ip)
- v_IP_Obj.setProperty("IP_LOCATION", "fas")
- v_IP_Obj.setProperty("FIRST_FOUND_TIME", 1)
- v_IP_Obj.setProperty("LAST_FOUND_TIME", 1)
- v_IP_Obj.setProperty("IP_APPEAR_COUNT", 1)
-*/
-
- /*
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .config("arangodb.hosts", "192.168.40.127:8529")
- .config("arangodb.user", "root")
- .config("arangodb.password", "111111")
- .master(Config.MASTER)
- .getOrCreate()
-
- val value: ArangoRDD[BaseDocument] = ArangoSpark
- .load[BaseDocument](spark.sparkContext,"V_FQDN",ReadOptions("insert_iplearn_index"))
-
-// var stringToDocument: Map[String, BaseDocument] = Map[String,BaseDocument]()
- val lstBuffer: ListBuffer[(String, BaseDocument)] = ListBuffer[(String, BaseDocument)]()
- val map: Map[String, BaseDocument] = value.map(doc => (doc.getKey,doc)).collect().toMap
- println(map.size)
-
- spark.close()
-*/
- /*
- arangoDB = new ArangoDB.Builder()
- .maxConnections(10)
- .host("192.168.40.127", 8529)
- .user("root")
- .password("111111")
- .build
-
- val db = arangoDB.db("insert_iplearn_index")
-// db.createCollection("V_FQDN")
-// db.createCollection("V_IP")
-// db.createCollection("E_ADDRESS_V_FQDN_TO_V_IP")
-// db.createCollection("E_VISIT_V_IP_TO_V_FQDN")
- val v_FQDN_Coll = db.collection("E_VISIT_V_IP_TO_V_FQDN")
-*/
-
-// val coll: ArangoCollection = db.collection("V_FQDN")
-// val value = coll.getDocument("test1.com",classOf[BaseDocument])
-// val str = value.getAttribute("v_fqdn_test_str")
-// val num: Int = value.getAttribute("v_fqdn_test_num").toString.toInt
-// println(str+"-"+num)
- /*
- val docs = new util.ArrayList[BaseDocument]
- val baseDocument1 = new BaseDocument
- baseDocument1.setKey("test1.com")
- baseDocument1.addAttribute("v_fqdn_test_str", "one2three")
- baseDocument1.addAttribute("v_fqdn_test_num", 1234)
- docs.add(baseDocument1)
-
- val baseDocument2 = new BaseDocument
- baseDocument2.setKey("test2.com")
- baseDocument2.addAttribute("v_fqdn_test_str", "1Two3")
- baseDocument2.addAttribute("v_fqdn_test_num", 4321)
- docs.add(baseDocument2)
- coll.importDocuments(docs)
-*/
-
-// arangoDB.shutdown()
-
-
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/test/TestSparkJoin.scala b/ip-learning/src/main/scala/cn/ac/iie/test/TestSparkJoin.scala
deleted file mode 100644
index 5c74ab7..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/test/TestSparkJoin.scala
+++ /dev/null
@@ -1,56 +0,0 @@
-package cn.ac.iie.test
-
-import cn.ac.iie.dao.BaseMediaDataLoad
-import cn.ac.iie.etl.CursorTransform
-import cn.ac.iie.pojo.BaseVertexFqdn
-import cn.ac.iie.utils.InitArangoDBPool
-import com.arangodb.ArangoCursor
-import org.apache.spark.sql.{DataFrame, SparkSession}
-import org.slf4j.{Logger, LoggerFactory}
-
-object TestSparkJoin {
- private val logger: Logger = LoggerFactory.getLogger(TestSparkJoin.getClass)
-
- def main(args: Array[String]): Unit = {
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", Config.SPARK_SERIALIZER)
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", Config.SPARK_SQL_SHUFFLE_PARTITIONS)
- .config("spark.executor.memory", Config.SPARK_EXECUTOR_MEMORY)
- .master(Config.MASTER)
- .getOrCreate()
- logger.warn("sparkession获取成功!!!")
- BaseMediaDataLoad.loadMediaDate(spark)
-// val v_FQDN_DF = BaseMediaDataLoad.getFQDNVertexFromMedia(spark)
- BaseMediaDataLoad.getFQDNAddressIPEdgeFromMedia(spark).show(10)
-// v_FQDN_DF.printSchema()
-
-
-/*
- val arangoDB = InitArangoDBPool.arangoDB
- val options = InitArangoDBPool.options
- val bindVars = InitArangoDBPool.bindVars
-
-// val v_FQDN_Query = "FOR doc IN V_FQDN limit 100 RETURN doc"
-// val v_FQDN_Query = "FOR doc IN V_FQDN RETURN doc"
-// val v_FQDN_Cursor: ArangoCursor[BaseVertexFqdn] = arangoDB.db("insert_iplearn_noindex")
-// .query(v_FQDN_Query, bindVars, options, classOf[BaseVertexFqdn])
-// val v_FQDN_Curson_DF: DataFrame = spark.createDataFrame(v_FQDN_Cursor.asListRemaining(),classOf[BaseVertexFqdn])
-// v_FQDN_Curson_DF.printSchema()
-//
- val v_FQDN_Curson_DF = CursorTransform.cursorToDataFrame("V_FQDN",classOf[BaseVertexFqdn],spark)
- val v_Fqdn_Join_Df = v_FQDN_DF.join(v_FQDN_Curson_DF,v_FQDN_DF("new_fqdn_name")===v_FQDN_Curson_DF("key"),"fullouter")
- v_Fqdn_Join_Df.printSchema()
- v_Fqdn_Join_Df
-// .filter(row => row.getAs[String]("new_fqdn_name")!=null)
-// .filter(row => row.getAs[String]("new_fqdn_name")!=null)
- .show(300)
-
- arangoDB.shutdown()
- spark.close()
-*/
- }
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/utils/ConfigUtils.scala b/ip-learning/src/main/scala/cn/ac/iie/utils/ConfigUtils.scala
deleted file mode 100644
index 9c52b57..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/utils/ConfigUtils.scala
+++ /dev/null
@@ -1,34 +0,0 @@
-package cn.ac.iie.utils
-
-import com.typesafe.config.{Config, ConfigFactory}
-
-object ConfigUtils {
- private lazy val config: Config = ConfigFactory.load()
-
- val SPARK_SQL_SHUFFLE_PARTITIONS: String = config.getString("spark.sql.shuffle.partitions")
- val SPARK_SQL_READ_FETCHSIZE: String = config.getString("spark.sql.read.fetchsize")
- val SPARK_EXECUTOR_MEMORY: String = config.getString("spark.executor.memory")
- val SPARK_APP_NAME: String = config.getString("spark.app.name")
- val SPARK_NETWORK_TIMEOUT: String = config.getString("spark.network.timeout")
- val REPARTITION_NUMBER: Int = config.getInt("repartitionNumber")
- val SPARK_SERIALIZER: String = config.getString("spark.serializer")
-
- val NUMPARTITIONS: String = config.getString("numPartitions")
- val MASTER: String = config.getString("master")
- val MAXPOOLSIZE: Int = config.getInt("maxPoolSize")
- val MINTIME: String = config.getString("minTime")
- val MAXTIME: String = config.getString("maxTime")
-
- val ARANGODB_HOST: String= config.getString("arangoDB.host")
- val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
- val ARANGODB_USER: String= config.getString("arangoDB.user")
- val ARANGODB_PASSWORD:String= config.getString("arangoDB.password")
- val ARANGODB_BATCH: Int = config.getInt("arangoDB.batch")
- val ARANGODB_DB_NAME:String= config.getString("arangoDB.DB.name")
- val ARANGODB_TTL: Int = config.getInt("arangoDB.ttl")
- val CLICKHOUSE_SOCKET_TIMEOUT: Int = config.getInt("clickhouse.socket.timeout")
-
- val THREAD_POOL_NUMBER: Int = config.getInt("thread.pool.number")
-
-
-}
diff --git a/ip-learning/src/main/scala/cn/ac/iie/utils/InitArangoDBPool.scala b/ip-learning/src/main/scala/cn/ac/iie/utils/InitArangoDBPool.scala
deleted file mode 100644
index 3a2efdc..0000000
--- a/ip-learning/src/main/scala/cn/ac/iie/utils/InitArangoDBPool.scala
+++ /dev/null
@@ -1,24 +0,0 @@
-package cn.ac.iie.utils
-
-import java.util
-
-import com.arangodb.ArangoDB
-import com.arangodb.model.AqlQueryOptions
-import com.arangodb.util.MapBuilder
-
-object InitArangoDBPool {
-
- @transient
- lazy val arangoDB: ArangoDB = new ArangoDB.Builder()
- .maxConnections(ConfigUtils.MAXPOOLSIZE)
- .host(ConfigUtils.ARANGODB_HOST, ConfigUtils.ARANGODB_PORT)
- .user(ConfigUtils.ARANGODB_USER)
- .password(ConfigUtils.ARANGODB_PASSWORD)
- .build
-
- val bindVars: util.Map[String, AnyRef] = new MapBuilder().get
-
- val options: AqlQueryOptions = new AqlQueryOptions()
- .ttl(ConfigUtils.ARANGODB_TTL)
-
-}
diff --git a/ip-learning/src/test/java/cn/ac/iie/CreateObjectJavaTest.java b/ip-learning/src/test/java/cn/ac/iie/CreateObjectJavaTest.java
deleted file mode 100644
index 8be7c35..0000000
--- a/ip-learning/src/test/java/cn/ac/iie/CreateObjectJavaTest.java
+++ /dev/null
@@ -1,32 +0,0 @@
-package cn.ac.iie;
-
-import com.tinkerpop.blueprints.impls.orient.OrientGraph;
-
-public class CreateObjectJavaTest {
- public static void main(String[] args) {
- String orientDbProtocol = "remote";
- String orientDbHost = "192.168.40.127";
- String orientDbDatabase= "iplearning-insert";
- String orientDbUsername= "root";
- String orientDbPassword= "111111";
-
-// String orientDbUri = "${orientDbProtocol}:${orientDbHost}/${orientDbDatabase}";
- String orientDbUri = orientDbProtocol+":"+orientDbHost+"/"+orientDbDatabase;
- OrientGraph dblpOrientDbGraph = new OrientGraph(orientDbUri, orientDbUsername, orientDbPassword);
-
-// val orientDB: OrientGraph = new OrientGraph(orientDbUri,orientDbUsername, orientDbPassword)
- try {
- System.out.println((dblpOrientDbGraph.isAutoStartTx()));
- dblpOrientDbGraph.dropEdgeType("author_of ");
- dblpOrientDbGraph.commit();
- }catch (Exception ex){
- //catching errors & print out
- System.out.println(ex.getMessage());
- }
- finally {
- //close the current OrientDb's connection
- dblpOrientDbGraph.shutdown();
- }
-
- }
-}
diff --git a/ip-learning/src/test/scala/cn/ac/iie/CreateObjectTest.scala b/ip-learning/src/test/scala/cn/ac/iie/CreateObjectTest.scala
deleted file mode 100644
index 42162d0..0000000
--- a/ip-learning/src/test/scala/cn/ac/iie/CreateObjectTest.scala
+++ /dev/null
@@ -1,157 +0,0 @@
-package cn.ac.iie
-
-import java.lang
-
-import com.orientechnologies.orient.core.command.OCommandRequest
-import com.orientechnologies.orient.core.config.OGlobalConfiguration
-import com.orientechnologies.orient.core.db._
-import com.orientechnologies.orient.core.metadata.schema.{OClass, OType}
-import com.orientechnologies.orient.core.sql.OCommandSQL
-import com.tinkerpop.blueprints.Vertex
-import com.tinkerpop.blueprints.impls.orient.{OrientDynaElementIterable, OrientGraph, OrientGraphFactory}
-
-object CreateObjectTest {
- def main(args: Array[String]): Unit = {
-
- /*
- val orientDbProtocol: String = "remote"
- val orientDbHost: String = "192.168.40.127"
- val orientDbDatabase: String = "iplearning-insert"
- val orientDbUsername: String = "root"
- val orientDbPassword: String = "111111"
-
- val orientDbUri: String = s"${orientDbProtocol}:${orientDbHost}/${orientDbDatabase}"
- val orientDB: OrientGraph = new OrientGraph(orientDbUri, orientDbUsername, orientDbPassword)
-
- try {
- println("---------------")
- println(orientDB.isAutoStartTx)
- orientDB.dropEdgeType("author-paper ")
- orientDB.commit()
- }catch {
- //catching errors & print out
- case ex: Exception => println(ex.getMessage)
- }
- finally {
- //close the current OrientDb's connection
- orientDB.shutdown()
- }
- */
-
- /*
- val poolCfg = OrientDBConfig.builder
- poolCfg.addConfig(OGlobalConfiguration.DB_POOL_MIN, 5)
- poolCfg.addConfig(OGlobalConfiguration.DB_POOL_MAX, 10)
-
- val pool: ODatabasePool = new ODatabasePool(orientDB, "iplearning-insert", "root", "111111", poolCfg.build)
- val session: ODatabaseSession = pool.acquire()
- println(session.isValidationEnabled)
- session.close()
- */
-
- /*
- val info = new Properties()
- info.put("user", "root")
- info.put("password", "111111")
-
- info.put("db.usePool", "true"); // USE THE POOL
- info.put("db.pool.min", "3"); // MINIMUM POOL SIZE
-
-
- val conn: OrientJdbcConnection = DriverManager.getConnection("jdbc:orient:remote:192.168.40.127/iplearing-index", info).asInstanceOf[OrientJdbcConnection]
- println(conn.isValid(1))
- */
-
-
- val uri: String = "remote:192.168.40.127/iplearning-insert"
- val pool = new OPartitionedDatabasePool(uri,"root","111111",20,20)
- val factory: OrientGraphFactory = new OrientGraphFactory(uri,"root","111111",pool)
-// val graph: OrientGraph = factory.getTx()
- val graph = factory.getNoTx
-
- /*
-
- val v_FQDN = graph.createVertexType("v_FQDN")
- v_FQDN.createProperty("FQDN_NAME", OType.STRING)
- v_FQDN.createProperty("FIRST_FOUND_TIME", OType.LONG)
- v_FQDN.createProperty("LAST_FOUND_TIME", OType.LONG)
- v_FQDN.createProperty("FQDN_APPEAR_COUNT", OType.LONG)
-
- val v_IP = graph.createVertexType("v_IP")
- v_IP.createProperty("IP", OType.STRING)
- v_IP.createProperty("FIRST_FOUND_TIME", OType.LONG)
- v_IP.createProperty("LAST_FOUND_TIME", OType.LONG)
- v_IP.createProperty("IP_APPEAR_COUNT", OType.LONG)
-
- val e_FQDN_to_IP = graph.createEdgeType("E_ADDRESS_V_FQDN_TO_V_IP")
- e_FQDN_to_IP.createProperty("FIRST_FOUND_TIME", OType.LONG)
- e_FQDN_to_IP.createProperty("LAST_FOUND_TIME", OType.LONG)
- e_FQDN_to_IP.createProperty("COUNT_TOTAL", OType.LONG)
-
- val e_IP_to_FQDN = graph.createEdgeType("E_VISIT_V_IP_TO_V_FQDN")
- e_IP_to_FQDN.createProperty("FIRST_FOUND_TIME", OType.LONG)
- e_IP_to_FQDN.createProperty("LAST_FOUND_TIME", OType.LONG)
- e_IP_to_FQDN.createProperty("COUNT_TOTAL", OType.LONG)
-*/
-
-
- /*
- val orientDB = new OrientDB("remote:192.168.40.127", "root", "111111", OrientDBConfig.defaultConfig)
-
- val poolCfg = OrientDBConfig.builder
- poolCfg.addConfig(OGlobalConfiguration.DB_POOL_MIN, 5)
- poolCfg.addConfig(OGlobalConfiguration.DB_POOL_MAX, 10)
-
- val pool: ODatabasePool = new ODatabasePool(orientDB, "iplearning-insert", "root", "111111", poolCfg.build)
- val session: ODatabaseSession = pool.acquire()
-
- println(session.isClosed)
-*/
-
- /*
- val vertex = graph.addVertex("class:v_FQDN",Nil: _*)
- vertex.setProperties("FQDN_NAME","123com1234")
- vertex.setProperty("FIRST_FOUND_TIME", 1571241623)
- vertex.setProperty("LAST_FOUND_TIME", 1571241570)
- vertex.setProperty("FQDN_APPEAR_COUNT", 5)
-
- val v_IP = graph.addVertex("class:v_IP",Nil: _*)
- v_IP.setProperty("IP","3.3.3.3")
- v_IP.setProperty("IP_LOCATION","cn")
- v_IP.setProperty("FIRST_FOUND_TIME", 1571241623)
- v_IP.setProperty("LAST_FOUND_TIME", 1571241570)
- v_IP.setProperty("IP_APPEAR_COUNT", 6543)
-
- graph.addEdge(null,vertex,v_IP,"E_ADDRESS_V_FQDN_TO_V_IP")
-*/
- val sqlComm = new OCommandSQL(
- "UPDATE E_ADDRESS_V_FQDN_TO_V_IP SET LAST_FOUND_TIME = 1412,FQDN_APPEAR_COUNT = 1244"
- + "UPSERT WHERE 'v5-dy.ixigua.com' IN FQDN_NAME")
- graph.command(sqlComm).execute()
-
- import scala.collection.JavaConversions._
- /*
- if (graph.getVertices("v_FQDN.FQDN_NAME","v5-dy.ixigua.com111111").nonEmpty){
- for (v <- graph.getVertices("v_FQDN.FQDN_NAME","v5-dy.ixigua.com1")) {
- println(v == null)
- }
- }
-
- val sqlComm = new OCommandSQL(
- "UPDATE v_FQDN SET LAST_FOUND_TIME = 1412,FQDN_APPEAR_COUNT = 1244"
- + "WHERE 'v5-dy.ixigua.com' IN FQDN_NAME")
- graph.command(sqlComm).execute()
- */
-
-
- /*
- println(graphFactory.exists())
- val tx = graphFactory.getTx
- val v_FQDN = tx.createVertexType("V_FQDN")
- v_FQDN.createProperty("FQDN_NAME",OType.STRING)
- v_FQDN.createProperty("FIRST_FOUND_TIME",OType.LONG)
- v_FQDN.createProperty("LAST_FOUND_TIME",OType.LONG)
- */
- }
-
-}
diff --git a/ip-learning/src/test/scala/cn/ac/iie/HiveUnionTest.scala b/ip-learning/src/test/scala/cn/ac/iie/HiveUnionTest.scala
deleted file mode 100644
index 3a81125..0000000
--- a/ip-learning/src/test/scala/cn/ac/iie/HiveUnionTest.scala
+++ /dev/null
@@ -1,78 +0,0 @@
-package cn.ac.iie
-
-import org.apache.spark.sql.{DataFrame, SparkSession}
-
-object HiveUnionTest {
- def main(args: Array[String]): Unit = {
-
- val spark: SparkSession = SparkSession
- .builder()
- .appName("test")
- .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
- .config("spark.network.timeout", "300s")
- .config("spark.sql.shuffle.partitions", 50)
- .master("local[*]")
-
- /*
- .config("spark.executor.memory", "30g")
- .config("spark.driver.host", "192.168.41.79")
- .config("spark.jars", "D:\\GITREPO\\ip-learning\\target\\ip-learning-1.0-SNAPSHOT-jar-with-dependencies.jar")
- .master("spark://192.168.40.119:7077")
- */
-
- .getOrCreate()
-
- val sql =
- """
- |(SELECT s1_s_ip AS IP, s1_s_location_region AS location,recv_time FROM av_miner.media_expire_patch limit 100
- |UNION ALL
- |SELECT s1_d_ip AS IP, s1_d_location_region AS location, recv_time FROM av_miner.media_expire_patch limit 100)
- """.stripMargin
-
- val mediaDataFrame: DataFrame = spark.read.format("jdbc")
- .option("url", "jdbc:clickhouse://192.168.40.193:8123")
- .option("dbtable","(select * from av_miner.media_expire_patch limit 100) as media_expire_patch")
-// .option("dbtable", "av_miner.media_expire_patch")
- // .option("dbtable", sql + " as a")
- .option("driver", "ru.yandex.clickhouse.ClickHouseDriver")
- .option("user", "default")
- .option("password", "111111")
- .option("numPartitions", "40")
- .option("fetchsize", "1000000")
- .load()
- mediaDataFrame.printSchema()
- mediaDataFrame.createOrReplaceGlobalTempView("media_expire_patch")
- val frame = spark.sql(
- """
- select IP,location,MIN(recv_time) AS FIRST_FOUND_TIME, MAX(recv_time) AS LAST_FOUND_TIME, COUNT(*) AS COUNT_TOTAL from (
- (SELECT s1_s_ip AS IP, s1_s_location_region AS location,recv_time FROM global_temp.media_expire_patch limit 100)
- UNION ALL
- (SELECT s1_d_ip AS IP, s1_d_location_region AS location, recv_time FROM global_temp.media_expire_patch limit 100)
- ) group by IP,location
- """.stripMargin)
-
- val e_Address_v_FQDN_to_v_IP_DF = spark.sql(
- """
- |SELECT
- | media_domain AS V_FQDN,
- | media_type,
- | s1_d_ip AS V_IP,
- | MIN( recv_time ) AS FIRST_FOUND_TIME,
- | MAX( recv_time ) AS LAST_FOUND_TIME,
- | COUNT( * ) AS COUNT_TOTAL
- |FROM
- | global_temp.media_expire_patch
- |WHERE
- | ( media_domain != '' )
- | AND ( s1_d_ip != '' )
- |GROUP BY
- | s1_d_ip,
- | media_domain,
- | media_type
- """.stripMargin)
- e_Address_v_FQDN_to_v_IP_DF.printSchema()
- e_Address_v_FQDN_to_v_IP_DF.show(200)
-// mediaDataFrame.show(20)
- }
-
-}
diff --git a/ip-learning/src/test/scala/cn/ac/iie/TestMap.scala b/ip-learning/src/test/scala/cn/ac/iie/TestMap.scala
deleted file mode 100644
index 6bb4a97..0000000
--- a/ip-learning/src/test/scala/cn/ac/iie/TestMap.scala
+++ /dev/null
@@ -1,11 +0,0 @@
-package cn.ac.iie
-
-object TestMap {
- def main(args: Array[String]): Unit = {
- var mapTest: Map[String, Int] = Map[String,Int]()
- mapTest += ("1" -> 1)
- println(mapTest.size)
-
- }
-
-}
From 0d02f2066c82424080425df05ef84f8482c46672 Mon Sep 17 00:00:00 2001
From: wanglihui <949764788@qq.com>
Date: Thu, 6 Aug 2020 16:13:59 +0800
Subject: [PATCH 3/6] =?UTF-8?q?IP=20Learning=20tsg=E9=A1=B9=E7=9B=AE=20spa?=
=?UTF-8?q?rk=E7=89=88=E6=9C=AC=E9=A6=96=E6=AC=A1=E6=8F=90=E4=BA=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../java/cn/ac/iie/dao/BaseArangoData.java | 40 ++--
.../service/read/ReadHistoryArangoData.java | 50 ++++-
.../java/cn/ac/iie/utils/ArangoDBConnect.java | 14 +-
.../cn/ac/iie/utils/ExecutorThreadPool.java | 21 +-
.../src/main/resources/application.properties | 46 ++--
.../src/main/resources/log4j.properties | 4 +-
.../cn/ac/iie/config/ApplicationConfig.scala | 46 ++++
.../cn/ac/iie/dao/BaseClickhouseData.scala | 204 ++++++++++++++++++
.../ac/iie/main/IpLearningApplication.scala | 19 +-
.../service/transform/MergeDataFrame.scala | 83 +++++++
.../iie/service/update/UpdateDocHandler.scala | 118 ++++++++++
.../iie/service/update/UpdateDocument.scala | 189 ++++++++++++++++
.../spark/partition/CustomPartitioner.scala | 2 +-
.../cn/ac/iie/utils/SparkSessionUtil.scala | 27 +++
.../ac/iie/dao/BaseClickhouseDataTest.scala | 38 ++++
.../service/update/UpdateDocumentTest.scala | 30 +++
16 files changed, 862 insertions(+), 69 deletions(-)
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java b/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java
index af47dcf..a141219 100644
--- a/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/dao/BaseArangoData.java
@@ -21,37 +21,37 @@ import java.util.concurrent.CountDownLatch;
public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
- static ConcurrentHashMap> historyVertexFqdnMap = new ConcurrentHashMap<>();
- static ConcurrentHashMap> historyVertexIpMap = new ConcurrentHashMap<>();
- static ConcurrentHashMap> historyVertexSubscriberMap = new ConcurrentHashMap<>();
- static ConcurrentHashMap> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
- static ConcurrentHashMap> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
- static ConcurrentHashMap> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
- static ConcurrentHashMap> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyVertexFqdnMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyVertexIpMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyVertexSubscriberMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
+ public static ConcurrentHashMap> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
- void readHistoryData(String table,
+ public void readHistoryData(String table,
ConcurrentHashMap> historyMap,
Class type) {
try {
- LOG.info("开始更新" + table);
+ LOG.warn("开始更新" + table);
long start = System.currentTimeMillis();
- for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
+ for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
historyMap.put(i, new ConcurrentHashMap<>());
}
- CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER);
+ CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
long[] timeRange = getTimeRange(table);
- for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER; i++) {
+ for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
String sql = getQuerySql(timeRange, i, table);
ReadHistoryArangoData readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData);
}
countDownLatch.await();
long last = System.currentTimeMillis();
- LOG.info("读取" + table + " arangoDB 共耗时:" + (last - start));
+ LOG.warn("读取" + table + " arangoDB 共耗时:" + (last - start));
} catch (Exception e) {
e.printStackTrace();
}
@@ -62,14 +62,14 @@ public class BaseArangoData {
long maxTime = 0L;
long startTime = System.currentTimeMillis();
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
- switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE) {
+ switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) {
case 0:
ArangoCursor timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
try {
if (timeDoc != null) {
while (timeDoc.hasNext()) {
BaseDocument doc = timeDoc.next();
- maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER;
+ maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
}
} else {
@@ -80,13 +80,13 @@ public class BaseArangoData {
}
break;
case 1:
- maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME;
- minTime = ApplicationConfig.READ_ARANGO_MIN_TIME;
+ maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
+ minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
break;
default:
}
long lastTime = System.currentTimeMillis();
- LOG.info(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
+ LOG.warn(sql + "\n查询最大最小时间用时:" + (lastTime - startTime));
return new long[]{minTime, maxTime};
}
@@ -94,10 +94,10 @@ public class BaseArangoData {
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
long minTime = timeRange[0];
long maxTime = timeRange[1];
- long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER;
+ long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER();
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
long minThreadTime = minTime + threadNumber * diffTime;
- return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT + " RETURN doc";
+ return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
}
}
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java b/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java
index 0b4eda5..1ca66d7 100644
--- a/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/service/read/ReadHistoryArangoData.java
@@ -8,18 +8,30 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
-import static cn.ac.iie.service.read.ReadClickhouseData.RECENT_COUNT_HOUR;
/**
* @author wlh
* 多线程全量读取arangoDb历史数据,封装到map
*/
public class ReadHistoryArangoData extends Thread {
+ public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
+ static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
+
+ public static final HashSet PROTOCOL_SET;
+
+ static {
+ PROTOCOL_SET = new HashSet<>();
+ PROTOCOL_SET.add("HTTP");
+ PROTOCOL_SET.add("TLS");
+ PROTOCOL_SET.add("DNS");
+ }
private ArangoDBConnect arangoConnect;
private String query;
@@ -52,25 +64,35 @@ public class ReadHistoryArangoData extends Thread {
int i = 0;
for (T doc : baseDocuments) {
String key = doc.getKey();
- int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER;
+ switch (table) {
+ case "R_LOCATE_FQDN2IP":
+ updateProtocolDocument(doc);
+ deleteDistinctClientIpByTime(doc);
+ break;
+ case "R_VISIT_IP2FQDN":
+ updateProtocolDocument(doc);
+ break;
+ default:
+ }
+ int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER();
ConcurrentHashMap tmpMap = map.get(hashCode);
tmpMap.put(key, doc);
i++;
}
long l = System.currentTimeMillis();
- LOG.info(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
+ LOG.warn(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
}
}catch (Exception e){
e.printStackTrace();
}finally {
countDownLatch.countDown();
- LOG.info("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
+ LOG.warn("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
}
}
private void updateProtocolDocument(T doc) {
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
- for (String protocol : ReadClickhouseData.PROTOCOL_SET) {
+ for (String protocol : PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
ArrayList cntRecent = (ArrayList) doc.getAttribute(protocolRecent);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
@@ -82,4 +104,22 @@ public class ReadHistoryArangoData extends Thread {
}
}
+ private void deleteDistinctClientIpByTime(T doc) {
+ ArrayList distCip = (ArrayList) doc.getAttribute("DIST_CIP");
+ ArrayList distCipTs = (ArrayList) doc.getAttribute("DIST_CIP_TS");
+ distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
+ Collections.sort(distCipTs);
+ int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
+ String[] distCipArr = new String[index];
+ long[] disCipTsArr = new long[index];
+ if (distCip.size() + 1 == distCipTs.size()){
+ for (int i = 0; i < index; i++) {
+ distCipArr[i] = distCip.get(i);
+ disCipTsArr[i] = distCipTs.get(i);
+ }
+ }
+ doc.updateAttribute("DIST_CIP", distCipArr);
+ doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
+ }
+
}
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java
index fc62f08..d5fb1b8 100644
--- a/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ArangoDBConnect.java
@@ -28,10 +28,10 @@ public class ArangoDBConnect {
private static void getArangoDatabase(){
arangoDB = new ArangoDB.Builder()
- .maxConnections(ApplicationConfig.THREAD_POOL_NUMBER)
- .host(ApplicationConfig.ARANGODB_HOST, ApplicationConfig.ARANGODB_PORT)
- .user(ApplicationConfig.ARANGODB_USER)
- .password(ApplicationConfig.ARANGODB_PASSWORD)
+ .maxConnections(ApplicationConfig.THREAD_POOL_NUMBER())
+ .host(ApplicationConfig.ARANGODB_HOST(), ApplicationConfig.ARANGODB_PORT())
+ .user(ApplicationConfig.ARANGODB_USER())
+ .password(ApplicationConfig.ARANGODB_PASSWORD())
.build();
}
@@ -43,7 +43,7 @@ public class ArangoDBConnect {
}
private ArangoDatabase getDatabase(){
- return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME);
+ return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME());
}
public void clean(){
@@ -59,7 +59,7 @@ public class ArangoDBConnect {
public ArangoCursor executorQuery(String query,Class type){
ArangoDatabase database = getDatabase();
Map bindVars = new MapBuilder().get();
- AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL);
+ AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL());
try {
return database.query(query, bindVars, options, type);
}catch (Exception e){
@@ -101,7 +101,7 @@ public class ArangoDBConnect {
MultiDocumentEntity> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
Collection errors = documentCreateEntityMultiDocumentEntity.getErrors();
for (ErrorEntity errorEntity:errors){
- LOG.debug("写入arangoDB异常:"+errorEntity.getErrorMessage());
+ LOG.warn("写入arangoDB异常:"+errorEntity.getErrorMessage());
}
}
}catch (Exception e){
diff --git a/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java
index e3142ae..b9190d3 100644
--- a/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java
+++ b/ip-learning-spark/src/main/java/cn/ac/iie/utils/ExecutorThreadPool.java
@@ -22,7 +22,7 @@ public class ExecutorThreadPool {
.setNameFormat("iplearning-application-pool-%d").build();
//Common Thread Pool
- pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER, ApplicationConfig.THREAD_POOL_NUMBER*2,
+ pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER(), ApplicationConfig.THREAD_POOL_NUMBER()*2,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
@@ -40,28 +40,9 @@ public class ExecutorThreadPool {
pool.execute(command);
}
- @Deprecated
- public void awaitThreadTask(){
- try {
- while (!pool.awaitTermination(ApplicationConfig.THREAD_AWAIT_TERMINATION_TIME, TimeUnit.SECONDS)) {
- System.out.println("线程池没有关闭");
- }
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
public void shutdown(){
pool.shutdown();
}
- @Deprecated
- public static Long getThreadNumber(){
- String name = Thread.currentThread().getName();
- String[] split = name.split("-");
- return Long.parseLong(split[3]);
- }
-
-
}
diff --git a/ip-learning-spark/src/main/resources/application.properties b/ip-learning-spark/src/main/resources/application.properties
index 87b0bbb..473771f 100644
--- a/ip-learning-spark/src/main/resources/application.properties
+++ b/ip-learning-spark/src/main/resources/application.properties
@@ -1,25 +1,45 @@
#spark任务配置
-spark.sql.shuffle.partitions=144
-spark.sql.read.fetchsize=10000
-spark.executor.memory=120g
+spark.sql.shuffle.partitions=5
+spark.executor.memory=4g
spark.app.name=test
spark.network.timeout=300s
repartitionNumber=36
spark.serializer=org.apache.spark.serializer.KryoSerializer
master=local[*]
#spark读取clickhouse配置
-numPartitions=144
-maxPoolSize=40
-minTime=1571245199
-maxTime=1571284799
+spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3
+spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
+spark.read.clickhouse.user=default
+spark.read.clickhouse.password=111111
+spark.read.clickhouse.numPartitions=144
+spark.read.clickhouse.fetchsize=10000
+spark.read.clickhouse.partitionColumn=common_recv_time
clickhouse.socket.timeout=300000
#arangoDB配置
-arangoDB.host=192.168.40.127
+arangoDB.host=192.168.40.182
arangoDB.port=8529
-arangoDB.user=root
-arangoDB.password=111111
-arangoDB.DB.name=insert_iplearn_index
-arangoDB.batch=100000
+arangoDB.user=upsert
+arangoDB.password=ceiec2018
+#arangoDB.DB.name=insert_iplearn_index
+arangoDB.DB.name=ip-learning-test-0
arangoDB.ttl=3600
-thread.pool.number=10
+thread.pool.number=5
+
+#读取clickhouse时间范围方式,0:读取过去一小时;1:指定时间范围
+clickhouse.time.limit.type=0
+read.clickhouse.max.time=1571245220
+read.clickhouse.min.time=1571245210
+
+#读取arangoDB时间范围方式,0:正常读;1:指定时间范围
+arango.time.limit.type=0
+read.arango.max.time=1571245320
+read.arango.min.time=1571245200
+
+arangoDB.read.limit=
+update.arango.batch=10000
+
+distinct.client.ip.num=10000
+recent.count.hour=24
+
+update.interval=3600
diff --git a/ip-learning-spark/src/main/resources/log4j.properties b/ip-learning-spark/src/main/resources/log4j.properties
index ee350e5..5d836b5 100644
--- a/ip-learning-spark/src/main/resources/log4j.properties
+++ b/ip-learning-spark/src/main/resources/log4j.properties
@@ -6,13 +6,13 @@ log4j.logger.org.apache.http.wire=OFF
log4j.rootLogger=info,console,file
# ����̨��־����
log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.Threshold=info
+log4j.appender.console.Threshold=warn
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# �ļ���־����
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.file.Threshold=info
+log4j.appender.file.Threshold=warn
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
#·���������·����������ز��������Ӧ��Ŀ��
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala
index 9e72fac..395ea6b 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/config/ApplicationConfig.scala
@@ -1,5 +1,51 @@
package cn.ac.iie.config
+import com.typesafe.config.{Config, ConfigFactory}
+
object ApplicationConfig {
+ private lazy val config: Config = ConfigFactory.load()
+
+ val SPARK_SQL_SHUFFLE_PARTITIONS: Int = config.getInt("spark.sql.shuffle.partitions")
+ val SPARK_EXECUTOR_MEMORY: String = config.getString("spark.executor.memory")
+ val SPARK_APP_NAME: String = config.getString("spark.app.name")
+ val SPARK_NETWORK_TIMEOUT: String = config.getString("spark.network.timeout")
+// val REPARTITION_NUMBER: Int = config.getInt("repartitionNumber")
+ val MASTER: String = config.getString("master")
+ val SPARK_SERIALIZER: String = config.getString("spark.serializer")
+
+ val NUMPARTITIONS: String = config.getString("spark.read.clickhouse.numPartitions")
+ val SPARK_READ_CLICKHOUSE_URL: String = config.getString("spark.read.clickhouse.url")
+ val SPARK_READ_CLICKHOUSE_DRIVER: String = config.getString("spark.read.clickhouse.driver")
+ val SPARK_READ_CLICKHOUSE_USER: String = config.getString("spark.read.clickhouse.user")
+ val SPARK_READ_CLICKHOUSE_PASSWORD: String = config.getString("spark.read.clickhouse.password")
+ val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
+ val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
+
+ val ARANGODB_HOST: String= config.getString("arangoDB.host")
+ val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
+ val ARANGODB_USER: String= config.getString("arangoDB.user")
+ val ARANGODB_PASSWORD:String= config.getString("arangoDB.password")
+ val ARANGODB_DB_NAME:String= config.getString("arangoDB.DB.name")
+ val ARANGODB_TTL: Int = config.getInt("arangoDB.ttl")
+ val CLICKHOUSE_SOCKET_TIMEOUT: Int = config.getInt("clickhouse.socket.timeout")
+
+ val THREAD_POOL_NUMBER: Int = config.getInt("thread.pool.number")
+
+ val CLICKHOUSE_TIME_LIMIT_TYPE: Int = config.getInt("clickhouse.time.limit.type")
+
+ val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
+ val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
+
+ val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type")
+
+ val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
+ val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
+
+ val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
+ val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
+ val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
+ val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
+
+ val UPDATE_INTERVAL: Int = config.getInt("update.interval")
}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala
index 3a19be9..952c30c 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/dao/BaseClickhouseData.scala
@@ -1,5 +1,209 @@
package cn.ac.iie.dao
+import cn.ac.iie.config.ApplicationConfig
+import cn.ac.iie.utils.SparkSessionUtil.spark
+import org.apache.spark.sql.DataFrame
+import org.slf4j.LoggerFactory
+
object BaseClickhouseData {
+ private val LOG = LoggerFactory.getLogger(BaseClickhouseData.getClass)
+
+ val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
+ private val timeLimit: (Long, Long) = getTimeLimit
+
+ private def initClickhouseData(sql:String): Unit ={
+
+ val dataFrame: DataFrame = spark.read.format("jdbc")
+ .option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
+ .option("dbtable", sql)
+ .option("driver", ApplicationConfig.SPARK_READ_CLICKHOUSE_DRIVER)
+ .option("user", ApplicationConfig.SPARK_READ_CLICKHOUSE_USER)
+ .option("password", ApplicationConfig.SPARK_READ_CLICKHOUSE_PASSWORD)
+ .option("numPartitions", ApplicationConfig.NUMPARTITIONS)
+ .option("partitionColumn", ApplicationConfig.SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN)
+ .option("lowerBound", timeLimit._2)
+ .option("upperBound", timeLimit._1)
+ .option("fetchsize", ApplicationConfig.SPARK_READ_CLICKHOUSE_FETCHSIZE)
+ .option("socket_timeout",ApplicationConfig.CLICKHOUSE_SOCKET_TIMEOUT)
+ .load()
+ dataFrame.printSchema()
+ dataFrame.createOrReplaceGlobalTempView("dbtable")
+ }
+
+ def loadConnectionDataFromCk(): Unit ={
+ val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
+ val sql =
+ s"""
+ |(SELECT
+ | ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
+ |FROM
+ | connection_record_log
+ |WHERE $where) as dbtable
+ """.stripMargin
+
+ LOG.warn(sql)
+ initClickhouseData(sql)
+ }
+
+ private def loadRadiusDataFromCk(): Unit ={
+ val where =
+ s"""
+ | common_recv_time >= ${timeLimit._2}
+ | AND common_recv_time < ${timeLimit._1}
+ | AND common_subscriber_id != ''
+ | AND radius_framed_ip != ''
+ | AND radius_packet_type = 4
+ | AND radius_acct_status_type = 1
+ """.stripMargin
+ val sql =
+ s"""
+ |(SELECT
+ | common_subscriber_id,radius_framed_ip,common_recv_time
+ |FROM
+ | tsg_galaxy_v3.radius_record_log
+ |WHERE
+ | $where) as dbtable
+ """.stripMargin
+ LOG.warn(sql)
+ initClickhouseData(sql)
+ }
+
+ def getVertexFqdnDf: DataFrame ={
+ loadConnectionDataFromCk()
+ val sql =
+ """
+ |SELECT
+ | FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
+ |FROM
+ | (
+ | (SELECT
+ | ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
+ | FROM
+ | global_temp.dbtable
+ | WHERE
+ | common_schema_type = 'SSL' GROUP BY ssl_sni
+ | )
+ | UNION ALL
+ | (SELECT
+ | http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
+ | FROM
+ | global_temp.dbtable
+ | WHERE
+ | common_schema_type = 'HTTP' GROUP BY http_host
+ | )
+ | )
+ |GROUP BY
+ | FQDN
+ |HAVING
+ | FQDN != ''
+ """.stripMargin
+ LOG.warn(sql)
+ val vertexFqdnDf = spark.sql(sql)
+ vertexFqdnDf.printSchema()
+ vertexFqdnDf
+ }
+
+ def getVertexIpDf: DataFrame ={
+ loadConnectionDataFromCk()
+ val sql =
+ """
+ |SELECT
+ | *
+ |FROM
+ | (
+ | (
+ | SELECT
+ | common_client_ip AS IP,
+ | MIN(common_recv_time) AS FIRST_FOUND_TIME,
+ | MAX(common_recv_time) AS LAST_FOUND_TIME,
+ | count(*) as SESSION_COUNT,
+ | sum(common_c2s_byte_num) as BYTES_SUM,
+ | 'client' as ip_type
+ | FROM
+ | global_temp.dbtable
+ | GROUP BY
+ | IP
+ | )
+ | UNION ALL
+ | (
+ | SELECT
+ | common_server_ip AS IP,
+ | MIN(common_recv_time) AS FIRST_FOUND_TIME,
+ | MAX(common_recv_time) AS LAST_FOUND_TIME,
+ | count(*) as SESSION_COUNT,
+ | sum(common_s2c_byte_num) as BYTES_SUM,
+ | 'server' as ip_type
+ | FROM
+ | global_temp.dbtable
+ | GROUP BY
+ | IP
+ | )
+ | )
+ """.stripMargin
+ LOG.warn(sql)
+ val vertexIpDf = spark.sql(sql)
+ vertexIpDf.printSchema()
+ vertexIpDf
+ }
+
+ def getRelationFqdnLocateIpDf: DataFrame ={
+ loadConnectionDataFromCk()
+ val sslSql =
+ """
+ |SELECT
+ | ssl_sni AS FQDN,
+ | common_server_ip,
+ | MAX(common_recv_time) AS LAST_FOUND_TIME,
+ | MIN(common_recv_time) AS FIRST_FOUND_TIME,
+ | COUNT(*) AS COUNT_TOTAL,
+ | collect_set(common_client_ip) AS DIST_CIP_RECENT,
+ | 'TLS' AS schema_type
+ |FROM
+ | global_temp.dbtable
+ |WHERE
+ | common_schema_type = 'SSL'
+ |GROUP BY
+ | ssl_sni,common_server_ip
+ """.stripMargin
+
+ val httpSql =
+ """
+ |SELECT
+ | http_host AS FQDN,
+ | common_server_ip,
+ | MAX(common_recv_time) AS LAST_FOUND_TIME,
+ | MIN(common_recv_time) AS FIRST_FOUND_TIME,
+ | COUNT(*) AS COUNT_TOTAL,
+ | collect_set(common_client_ip) AS DIST_CIP_RECENT,
+ | 'HTTP' AS schema_type
+ |FROM
+ | global_temp.dbtable
+ |WHERE
+ | common_schema_type = 'HTTP'
+ |GROUP BY
+ | http_host,common_server_ip
+ """.stripMargin
+ val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
+
+ LOG.warn(sql)
+ val relationFqdnLocateIpDf = spark.sql(sql)
+ relationFqdnLocateIpDf.printSchema()
+ relationFqdnLocateIpDf
+ }
+
+ private def getTimeLimit: (Long,Long) ={
+ var maxTime = 0L
+ var minTime = 0L
+ ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE match {
+ case 0 =>
+ maxTime = currentHour
+ minTime = maxTime - ApplicationConfig.UPDATE_INTERVAL
+ case 1 =>
+ maxTime = ApplicationConfig.READ_CLICKHOUSE_MAX_TIME
+ minTime = ApplicationConfig.READ_CLICKHOUSE_MIN_TIME
+ case _ =>
+ }
+ (maxTime, minTime)
+ }
}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala
index 17385f0..e3602d3 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/main/IpLearningApplication.scala
@@ -1,5 +1,22 @@
package cn.ac.iie.main
-object IpLearningApplication {
+import cn.ac.iie.service.update.UpdateDocument._
+import cn.ac.iie.utils.{ExecutorThreadPool, SparkSessionUtil}
+object IpLearningApplication {
+ private val pool = ExecutorThreadPool.getInstance
+
+ def main(args: Array[String]): Unit = {
+ try {
+ updateVertexFqdn()
+ updateVertexIp()
+ updateRelationFqdnLocateIp()
+ }catch {
+ case e:Exception => e.printStackTrace()
+ }finally {
+ pool.shutdown()
+ arangoManger.clean()
+ SparkSessionUtil.closeSpark()
+ }
+ }
}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala
index c7939fe..460caed 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/service/transform/MergeDataFrame.scala
@@ -1,5 +1,88 @@
package cn.ac.iie.service.transform
+import java.util.regex.Pattern
+
+import cn.ac.iie.config.ApplicationConfig
+import cn.ac.iie.dao.BaseClickhouseData
+import cn.ac.iie.spark.partition.CustomPartitioner
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.functions._
+import org.slf4j.LoggerFactory
+
object MergeDataFrame {
+ private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
+ private val pattern = Pattern.compile("^[\\d]*$")
+
+ def mergeVertexFqdn(): RDD[Row] ={
+ BaseClickhouseData.getVertexFqdnDf
+ .rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row))
+ .partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
+ }
+
+ def mergeVertexIp(): RDD[Row]={
+ val vertexIpDf = BaseClickhouseData.getVertexIpDf
+ val frame = vertexIpDf.groupBy("IP").agg(
+ min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
+ max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
+ collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
+ collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
+ collect_list("ip_type").alias("ip_type_list")
+ )
+ val values = frame.rdd.map(row => (row.get(0), row))
+ .partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
+ values
+ }
+
+ def mergeRelationFqdnLocateIp(): RDD[Row] ={
+ val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
+ .groupBy("FQDN", "common_server_ip")
+ .agg(
+ min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
+ max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
+ collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
+ collect_list("schema_type").alias("schema_type_list"),
+ collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
+ )
+ frame.rdd.map(row => {
+ val fqdn = row.getAs[String]("FQDN")
+ val serverIp = row.getAs[String]("common_server_ip")
+ val key = fqdn.concat("-"+serverIp)
+ (key,row)
+ }).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
+
+ }
+
+ private def isDomain(fqdn: String): Boolean = {
+ try {
+ if (fqdn == null || fqdn.length == 0) {
+ return false
+ }
+ if (fqdn.contains(":")) {
+ val s = fqdn.split(":")(0)
+ if (s.contains(":")){
+ return false
+ }
+ }
+ val fqdnArr = fqdn.split("\\.")
+ if (fqdnArr.length < 4 || fqdnArr.length > 4){
+ return true
+ }
+ for (f <- fqdnArr) {
+ if (pattern.matcher(f).matches) {
+ val i = f.toLong
+ if (i < 0 || i > 255) {
+ return true
+ }
+ } else {
+ return true
+ }
+ }
+ } catch {
+ case e: Exception =>
+ LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString)
+ }
+ false
+ }
}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala
index 64bed4d..bdf8120 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocHandler.scala
@@ -1,5 +1,123 @@
package cn.ac.iie.service.update
+
+import java.lang
+
+import cn.ac.iie.config.ApplicationConfig
+import cn.ac.iie.service.read.ReadHistoryArangoData
+import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
+
+import scala.collection.mutable
+import scala.collection.mutable.WrappedArray.ofRef
+
object UpdateDocHandler {
+ val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
+
+ def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
+ var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
+ if (newAttribute > hisAttritube){
+ hisAttritube = newAttribute
+ }
+ hisDoc.addAttribute(attributeName,hisAttritube)
+ }
+
+ def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
+ val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
+ hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
+ }
+
+ def separateAttributeByIpType(ipTypeList:ofRef[String],
+ sessionCountList:ofRef[AnyRef],
+ bytesSumList:ofRef[AnyRef]): (Long,Long,Long,Long) ={
+ var serverSessionCount = 0L
+ var serverBytesSum = 0L
+ var clientSessionCount = 0L
+ var clientBytesSum = 0L
+ if (ipTypeList.length == sessionCountList.length && ipTypeList.length == bytesSumList.length){
+ sessionCountList.zip(bytesSumList).zip(ipTypeList).foreach(t => {
+ t._2 match {
+ case "server" =>
+ serverSessionCount = t._1._1.toString.toLong
+ serverBytesSum = t._1._2.toString.toLong
+ case "client" =>
+ clientSessionCount = t._1._1.toString.toLong
+ clientBytesSum = t._1._2.toString.toLong
+ }
+ })
+ }
+ (serverSessionCount, serverBytesSum, clientSessionCount, clientBytesSum)
+ }
+
+ def separateAttributeByProtocol(schemaTypeList:ofRef[AnyRef],countTotalList:ofRef[AnyRef]): Map[String, Long] ={
+ var protocolMap: Map[String, Long] = Map()
+ if (schemaTypeList.length == countTotalList.length){
+ protocolMap = schemaTypeList.zip(countTotalList).map(t => (t._1.toString,t._2.toString.toLong)).toMap
+ }
+ PROTOCOL_SET.foreach(protocol => {
+ if (!protocolMap.contains(protocol)){
+ protocolMap += (protocol -> 0L)
+ }
+ })
+ protocolMap
+ }
+
+ def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
+ var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
+ protocolMap.foreach(t => {
+ if (t._2 > 0 && !protocolType.contains(t._1)){
+ protocolType = protocolType.concat(","+ t._1)
+ }
+ val cntTotalName = t._1.concat("_CNT_TOTAL")
+ val cntRecentName = t._1.concat("_CNT_RECENT")
+ val cntRecent: Array[lang.Long] = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[java.lang.Long]]
+ cntRecent.update(0,t._2)
+ updateSumAttribute(hisDoc,t._2,cntTotalName)
+ hisDoc.addAttribute(cntRecentName,cntRecent)
+ })
+ hisDoc.addAttribute("PROTOCOL_TYPE",protocolType)
+ }
+
+ def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
+ val protocolTypeBuilder = new mutable.StringBuilder()
+ protocolMap.foreach(t => {
+ if (t._2 > 0){
+ protocolTypeBuilder.append(","+t._1)
+ }
+ val cntTotalName = t._1.concat("_CNT_TOTAL")
+ val cntRecentName = t._1.concat("_CNT_RECENT")
+ val cntRecent: Array[Long] = new Array[Long](ApplicationConfig.RECENT_COUNT_HOUR)
+ cntRecent.update(0,t._2)
+ doc.addAttribute(cntTotalName,t._2)
+ doc.addAttribute(cntRecentName,cntRecent)
+ })
+ doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
+ }
+
+ def mergeDistinctIp(distCipRecent:ofRef[ofRef[String]]): Array[String] ={
+ distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
+ }
+
+ def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
+ val map = newDistinctIp.map(ip => {
+ (ip, ReadHistoryArangoData.currentHour)
+ }).toMap
+ doc.addAttribute("DIST_CIP",map.keys.toArray)
+ doc.addAttribute("DIST_CIP_TS",map.values.toArray)
+ }
+
+ def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
+ val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
+ val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[Array[Long]]
+ if (hisDistCip.length == hisDistCipTs.length){
+ val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
+ val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
+ newDistinctIp.foreach(cip => {
+ muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
+ })
+ val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
+ hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
+ hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray)
+ }
+ }
}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala
index c25c31e..b5f875f 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/service/update/UpdateDocument.scala
@@ -1,5 +1,194 @@
package cn.ac.iie.service.update
+import java.util
+
+import cn.ac.iie.config.ApplicationConfig
+import cn.ac.iie.dao.BaseArangoData
+import cn.ac.iie.dao.BaseArangoData._
+import cn.ac.iie.service.transform.MergeDataFrame._
+import cn.ac.iie.service.update.UpdateDocHandler._
+import cn.ac.iie.utils.ArangoDBConnect
+import cn.ac.iie.utils.SparkSessionUtil.spark
+import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
+import org.apache.spark.TaskContext
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.Row
+import org.slf4j.LoggerFactory
+
+import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocument {
+ val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance()
+ private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
+ private val baseArangoData = new BaseArangoData()
+
+ def updateVertexFqdn(): Unit ={
+ baseArangoData.readHistoryData("FQDN",historyVertexFqdnMap,classOf[BaseDocument])
+ val hisVerFqdnBc = spark.sparkContext.broadcast(historyVertexFqdnMap)
+ try {
+ val start = System.currentTimeMillis()
+ val mergeVertexFqdnDf: RDD[Row] = mergeVertexFqdn()
+ mergeVertexFqdnDf.foreachPartition(iter => {
+ val partitionId: Int = TaskContext.get.partitionId
+ val hisVerFqdnMapTmp = hisVerFqdnBc.value.get(partitionId)
+ val resultDocumentList: util.ArrayList[BaseDocument] = new util.ArrayList[BaseDocument]
+ var i = 0
+ iter.foreach(row => {
+ val fqdn = row.getAs[String]("FQDN")
+ val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
+ val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
+ var document: BaseDocument = hisVerFqdnMapTmp.getOrDefault(fqdn,null)
+ if (document != null){
+ updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
+ } else{
+ document = new BaseDocument
+ document.setKey(fqdn)
+ document.addAttribute("FQDN_NAME",fqdn)
+ document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
+ document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
+ }
+ resultDocumentList.add(document)
+ i+=1
+ if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
+ arangoManger.overwrite(resultDocumentList, "FQDN")
+ LOG.warn("更新FQDN:" + i)
+ i = 0
+ }
+ })
+ if (i != 0) {
+ arangoManger.overwrite(resultDocumentList, "FQDN")
+ LOG.warn("更新FQDN:" + i)
+ }
+ })
+ val last = System.currentTimeMillis()
+ LOG.warn(s"更新FQDN时间:${last-start}")
+ }catch {
+ case e:Exception => e.printStackTrace()
+ }finally {
+ hisVerFqdnBc.destroy()
+ }
+ }
+
+ def updateVertexIp(): Unit ={
+ baseArangoData.readHistoryData("IP",historyVertexIpMap,classOf[BaseDocument])
+ val hisVerIpBc = spark.sparkContext.broadcast(historyVertexIpMap)
+ try {
+ val start = System.currentTimeMillis()
+ val mergeVertexIpDf = mergeVertexIp()
+ mergeVertexIpDf.foreachPartition(iter => {
+ val partitionId: Int = TaskContext.get.partitionId
+ val hisVerIpMapTmp = hisVerIpBc.value.get(partitionId)
+ val resultDocumentList: util.ArrayList[BaseDocument] = new util.ArrayList[BaseDocument]
+ var i = 0
+ iter.foreach(row => {
+ val ip = row.getAs[String]("IP")
+ val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
+ val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
+ val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
+ val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
+ val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
+ val sepAttributeTuple = separateAttributeByIpType(ipTypeList,sessionCountList,bytesSumList)
+
+ var document = hisVerIpMapTmp.getOrDefault(ip,null)
+ if (document != null){
+ updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
+ updateSumAttribute(document,sepAttributeTuple._1,"SERVER_SESSION_COUNT")
+ updateSumAttribute(document,sepAttributeTuple._2,"SERVER_BYTES_SUM")
+ updateSumAttribute(document,sepAttributeTuple._3,"CLIENT_SESSION_COUNT")
+ updateSumAttribute(document,sepAttributeTuple._4,"CLIENT_BYTES_SUM")
+ } else {
+ document = new BaseDocument
+ document.setKey(ip)
+ document.addAttribute("IP",ip)
+ document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
+ document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
+ document.addAttribute("SERVER_SESSION_COUNT",sepAttributeTuple._1)
+ document.addAttribute("SERVER_BYTES_SUM",sepAttributeTuple._2)
+ document.addAttribute("CLIENT_SESSION_COUNT",sepAttributeTuple._3)
+ document.addAttribute("CLIENT_BYTES_SUM",sepAttributeTuple._4)
+ document.addAttribute("COMMON_LINK_INFO","")
+ }
+ resultDocumentList.add(document)
+ i+=1
+ if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
+ arangoManger.overwrite(resultDocumentList, "IP")
+ LOG.warn("更新IP:" + i)
+ i = 0
+ }
+ })
+ if (i != 0) {
+ arangoManger.overwrite(resultDocumentList, "IP")
+ LOG.warn("更新IP:" + i)
+ }
+ })
+ val last = System.currentTimeMillis()
+ LOG.warn(s"更新IP时间:${last-start}")
+ }catch {
+ case e:Exception => e.printStackTrace()
+ }finally {
+ hisVerIpBc.destroy()
+ }
+ }
+
+ def updateRelationFqdnLocateIp(): Unit ={
+ baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
+ val hisReFqdnLocIpBc = spark.sparkContext.broadcast(historyRelationFqdnAddressIpMap)
+ try {
+ val start = System.currentTimeMillis()
+ val mergeRelationFqdnLocateIpDf = mergeRelationFqdnLocateIp()
+ mergeRelationFqdnLocateIpDf.foreachPartition(iter => {
+ val partitionId: Int = TaskContext.get.partitionId
+ val hisRelaFqdnLocaIpMapTmp = hisReFqdnLocIpBc.value.get(partitionId)
+ val resultDocumentList: util.ArrayList[BaseEdgeDocument] = new util.ArrayList[BaseEdgeDocument]
+ var i = 0
+ iter.foreach(row => {
+ val fqdn = row.getAs[String]("FQDN")
+ val serverIp = row.getAs[String]("common_server_ip")
+ val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
+ val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
+ val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
+ val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
+ val distCipRecent = row.getAs[ofRef[ofRef[String]]]("DIST_CIP_RECENT")
+
+ val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList,countTotalList)
+ val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
+
+ val key = fqdn.concat("-"+serverIp)
+ var document: BaseEdgeDocument = hisRelaFqdnLocaIpMapTmp.getOrDefault(key,null)
+ if (document != null){
+ updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
+ updateProtocolAttritube(document,sepAttritubeMap)
+ updateDistinctIp(document,distinctIp)
+ }else {
+ document = new BaseEdgeDocument()
+ document.setKey(key)
+ document.setFrom("FQDN/" + fqdn)
+ document.setTo("IP/" + serverIp)
+ document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
+ document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
+ putProtocolAttritube(document,sepAttritubeMap)
+ putDistinctIp(document,distinctIp)
+ }
+ resultDocumentList.add(document)
+ i+=1
+ if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
+ arangoManger.overwrite(resultDocumentList, "R_LOCATE_FQDN2IP")
+ LOG.warn("更新R_LOCATE_FQDN2IP:" + i)
+ i = 0
+ }
+ })
+ if (i != 0) {
+ arangoManger.overwrite(resultDocumentList, "R_LOCATE_FQDN2IP")
+ LOG.warn("更新R_LOCATE_FQDN2IP:" + i)
+ }
+ })
+ val last = System.currentTimeMillis()
+ LOG.warn(s"更新R_LOCATE_FQDN2IP时间:${last-start}")
+ }catch {
+ case e:Exception => e.printStackTrace()
+ }finally {
+ hisReFqdnLocIpBc.destroy()
+ }
+ }
+
}
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala
index a3c26ae..f33a43e 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/spark/partition/CustomPartitioner.scala
@@ -1,4 +1,4 @@
-package cn.ac.iie.service.partition
+package cn.ac.iie.spark.partition
import org.apache.spark.Partitioner
diff --git a/ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala b/ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala
index ce0f417..12cfc86 100644
--- a/ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala
+++ b/ip-learning-spark/src/main/scala/cn/ac/iie/utils/SparkSessionUtil.scala
@@ -1,5 +1,32 @@
package cn.ac.iie.utils
+import cn.ac.iie.config.ApplicationConfig
+import org.apache.spark.sql.SparkSession
+import org.slf4j.LoggerFactory
+
object SparkSessionUtil {
+ private val LOG = LoggerFactory.getLogger(SparkSessionUtil.getClass)
+
+ val spark: SparkSession = getSparkSession
+
+ private def getSparkSession: SparkSession ={
+ val spark: SparkSession = SparkSession
+ .builder()
+ .appName(ApplicationConfig.SPARK_APP_NAME)
+ .config("spark.serializer", ApplicationConfig.SPARK_SERIALIZER)
+ .config("spark.network.timeout", ApplicationConfig.SPARK_NETWORK_TIMEOUT)
+ .config("spark.sql.shuffle.partitions", ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
+ .config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY)
+ .master(ApplicationConfig.MASTER)
+ .getOrCreate()
+ LOG.warn("sparkession获取成功!!!")
+ spark
+ }
+
+ def closeSpark(): Unit ={
+ if (spark != null){
+ spark.stop()
+ }
+ }
}
diff --git a/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala b/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala
index 7e73a98..608fb2d 100644
--- a/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala
+++ b/ip-learning-spark/src/test/scala/cn/ac/iie/dao/BaseClickhouseDataTest.scala
@@ -1,5 +1,43 @@
package cn.ac.iie.dao
+import cn.ac.iie.utils.SparkSessionUtil
+import org.apache.spark.sql.SparkSession
+
+
object BaseClickhouseDataTest {
+ private val spark: SparkSession = SparkSessionUtil.spark
+ def main(args: Array[String]): Unit = {
+ BaseClickhouseData loadConnectionDataFromCk()
+ val sql =
+ """
+ |SELECT
+ | FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
+ |FROM
+ | (
+ | (SELECT
+ | ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
+ | FROM
+ | global_temp.dbtable
+ | WHERE
+ | common_schema_type = 'SSL' GROUP BY ssl_sni
+ | )
+ | UNION ALL
+ | (SELECT
+ | http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
+ | FROM
+ | global_temp.dbtable
+ | WHERE
+ | common_schema_type = 'HTTP' GROUP BY http_host
+ | )
+ | )
+ |GROUP BY
+ | FQDN
+ |HAVING
+ | FQDN != ''
+ """.stripMargin
+ println(sql)
+ val vertexFqdnDf = spark.sql(sql)
+ vertexFqdnDf.show(10)
+ }
}
diff --git a/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala b/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala
index 28828b9..67590ff 100644
--- a/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala
+++ b/ip-learning-spark/src/test/scala/cn/ac/iie/service/update/UpdateDocumentTest.scala
@@ -1,5 +1,35 @@
package cn.ac.iie.service.update
+import java.util
+import java.util.ArrayList
+import java.util.concurrent.ConcurrentHashMap
+
+import cn.ac.iie.dao.BaseArangoData
+import cn.ac.iie.dao.BaseArangoData._
+import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
+
+import scala.collection.mutable.WrappedArray.ofRef
+
object UpdateDocumentTest {
+ def main(args: Array[String]): Unit = {
+ val baseArangoData = new BaseArangoData()
+ baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
+
+ val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
+ while (value.hasMoreElements) {
+ val integer: Integer = value.nextElement()
+ val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
+ val unit = map.keys()
+ while (unit.hasMoreElements) {
+ val key = unit.nextElement()
+ val edgeDocument = map.get(key)
+ // val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
+ // val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
+ val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
+ val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
+ println(longs.toString + "---" + strings.toString)
+ }
+ }
+ }
}
From e63abc31aac51f1a1c2c5ef83124a028fe6748b8 Mon Sep 17 00:00:00 2001
From: wanglihui <949764788@qq.com>
Date: Thu, 6 Aug 2020 16:29:37 +0800
Subject: [PATCH 4/6] =?UTF-8?q?IP=20Learning=20tsg=E9=A1=B9=E7=9B=AE=20spa?=
=?UTF-8?q?rk=E7=89=88=E6=9C=AC=E9=A6=96=E6=AC=A1=E6=8F=90=E4=BA=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
ip-learning-spark/.gitignore | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/ip-learning-spark/.gitignore b/ip-learning-spark/.gitignore
index 5db5dd3..f5a5d20 100644
--- a/ip-learning-spark/.gitignore
+++ b/ip-learning-spark/.gitignore
@@ -3,7 +3,11 @@
### Example user template
# IntelliJ project files
-.idea
+.idea/
*.iml
target
logs/
+pom.xml
+spark-warehouse/
+src/main/java/cn/ac/iie/config/
+src/test/java/
From bfdaae96a1bbfffbf6b29672d46a840baa21368a Mon Sep 17 00:00:00 2001
From: wanglihui <949764788@qq.com>
Date: Thu, 6 Aug 2020 16:32:57 +0800
Subject: [PATCH 5/6] =?UTF-8?q?IP=20Learning=20tsg=E9=A1=B9=E7=9B=AE=20spa?=
=?UTF-8?q?rk=E7=89=88=E6=9C=AC=E9=A6=96=E6=AC=A1=E6=8F=90=E4=BA=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../.idea/libraries/scala_sdk_2_11_7.xml | 139 +++++++++++++++++-
1 file changed, 138 insertions(+), 1 deletion(-)
diff --git a/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml b/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml
index 9a0159e..96b8d93 100644
--- a/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml
+++ b/ip-learning-spark/.idea/libraries/scala_sdk_2_11_7.xml
@@ -20,6 +20,143 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
From f44beda912b6514caf41f1489e96474e96f5757d Mon Sep 17 00:00:00 2001
From: wanglihui <949764788@qq.com>
Date: Thu, 6 Aug 2020 16:47:17 +0800
Subject: [PATCH 6/6] =?UTF-8?q?IP=20Learning=20tsg=E9=A1=B9=E7=9B=AE=20spa?=
=?UTF-8?q?rk=E7=89=88=E6=9C=AC=E9=A6=96=E6=AC=A1=E6=8F=90=E4=BA=A4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
ip-learning-spark/.gitignore | 1 -
ip-learning-spark/pom.xml | 154 +++++++++++++++++++++++++++++++++++
2 files changed, 154 insertions(+), 1 deletion(-)
create mode 100644 ip-learning-spark/pom.xml
diff --git a/ip-learning-spark/.gitignore b/ip-learning-spark/.gitignore
index f5a5d20..5b9ff5c 100644
--- a/ip-learning-spark/.gitignore
+++ b/ip-learning-spark/.gitignore
@@ -7,7 +7,6 @@
*.iml
target
logs/
-pom.xml
spark-warehouse/
src/main/java/cn/ac/iie/config/
src/test/java/
diff --git a/ip-learning-spark/pom.xml b/ip-learning-spark/pom.xml
new file mode 100644
index 0000000..204fa68
--- /dev/null
+++ b/ip-learning-spark/pom.xml
@@ -0,0 +1,154 @@
+
+
+ 4.0.0
+
+ cn.ac.iie
+ ip-learning-spark
+ 1.0-SNAPSHOT
+
+
+ javax.servlet
+ javax.servlet-api
+ 3.0.1
+
+
+
+ org.apache.httpcomponents
+ httpclient
+ 4.5.2
+
+
+
+
+ org.apache.httpcomponents
+ httpcore
+ 4.4.6
+
+
+
+ com.google.guava
+ guava
+ 19.0
+
+
+
+ org.apache.spark
+ spark-core_2.11
+ 2.2.3
+
+
+
+ org.apache.spark
+ spark-sql_2.11
+ 2.2.3
+
+
+
+ ru.yandex.clickhouse
+ clickhouse-jdbc
+ 0.1.54
+
+
+
+ com.typesafe
+ config
+ 1.2.1
+
+
+
+ com.arangodb
+ arangodb-java-driver
+ 6.6.3
+
+
+
+ org.scala-lang
+ scala-library
+ 2.11.8
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ 3.2.0
+
+
+
+
+ org.scala-tools
+ maven-scala-plugin
+ 2.15.2
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.2
+
+ 1.8
+ 1.8
+ UTF-8
+
+
+
+ compile
+
+ compile
+
+
+
+
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ 3.2.1
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+ 2.6
+
+
+
+ cn.ac.iie.config.ApplicationConfig
+
+
+
+ jar-with-dependencies
+
+
+
+
+ make-assembly
+ package
+
+ single
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file