Merge branch 'master' into ip-learning-graph-datacenter

# Conflicts:
#	IP-learning-graph/src/main/resources/clickhouse.properties
This commit is contained in:
wanglihui
2020-08-06 16:51:47 +08:00
22 changed files with 1455 additions and 10 deletions

View File

@@ -105,6 +105,7 @@ public class ReadClickhouseData {
newDoc.setKey(subscriberId); newDoc.setKey(subscriberId);
newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime); newDoc.addAttribute("FIRST_FOUND_TIME", firstFoundTime);
newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime); newDoc.addAttribute("LAST_FOUND_TIME", lastFoundTime);
newDoc.addAttribute("SUBSCRIBER_ID",subscriberId);
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
} }

View File

@@ -1,6 +1,6 @@
#arangoDB参数配置 #arangoDB参数配置
#arangoDB.host=192.168.40.182 arangoDB.host=192.168.40.182
arangoDB.host=192.168.40.224 #arangoDB.host=192.168.40.224
arangoDB.port=8529 arangoDB.port=8529
arangoDB.user=root arangoDB.user=root
arangoDB.password=111111 arangoDB.password=111111
@@ -17,9 +17,9 @@ thread.await.termination.time=10
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围 #读取clickhouse时间范围方式0读取过去一小时1指定时间范围
time.limit.type=0 time.limit.type=1
read.clickhouse.max.time=1595833062 read.clickhouse.max.time=1596684142
read.clickhouse.min.time=1595833060 read.clickhouse.min.time=1596425769
update.interval=3600 update.interval=3600
distinct.client.ip.num=10000 distinct.client.ip.num=10000

View File

@@ -1,9 +1,9 @@
drivers=ru.yandex.clickhouse.ClickHouseDriver drivers=ru.yandex.clickhouse.ClickHouseDriver
mdb.user=default mdb.user=default
#db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=3600000 db.id=192.168.40.186:8123/tsg_galaxy_v3?socket_timeout=300000
#mdb.password=111111 mdb.password=111111
db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=3600000 #db.id=192.168.40.224:8123/tsg_galaxy_v3?socket_timeout=300000
mdb.password=ceiec2019 #mdb.password=ceiec2019
initialsize=1 initialsize=1
minidle=1 minidle=1
maxactive=50 maxactive=50

View File

@@ -0,0 +1,10 @@
package cn.ac.iie;
import cn.ac.iie.dao.BaseArangoData;
import com.arangodb.entity.BaseEdgeDocument;
public class readHistoryDataTest {
public static void main(String[] args) {
BaseArangoData baseArangoData = new BaseArangoData();
}
}

View File

@@ -3,8 +3,10 @@
### Example user template ### Example user template
# IntelliJ project files # IntelliJ project files
.idea .idea/
*.iml *.iml
target target
logs/ logs/
spark-warehouse/ spark-warehouse/
src/main/java/cn/ac/iie/config/
src/test/java/

View File

@@ -0,0 +1,162 @@
<component name="libraryTable">
<library name="scala-sdk-2.11.7" type="Scala">
<properties>
<language-level>Scala_2_11</language-level>
<compiler-classpath>
<root url="file://D:/soft/scala/scala-2.11.7/lib/scala-compiler.jar" />
<root url="file://D:/soft/scala/scala-2.11.7/lib/scala-library.jar" />
<root url="file://D:/soft/scala/scala-2.11.7/lib/scala-reflect.jar" />
</compiler-classpath>
</properties>
<CLASSES>
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-actors-2.11.0.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-actors-migration_2.11-1.1.0.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-library.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-parser-combinators_2.11-1.0.4.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-reflect.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-swing_2.11-1.0.2.jar!/" />
<root url="jar://D:/soft/scala/scala-2.11.7/lib/scala-xml_2.11-1.0.4.jar!/" />
</CLASSES>
<JAVADOC>
<root url="http://www.scala-lang.org/api/2.11.7/" />
</JAVADOC>
<SOURCES>
<root url="file://D:/tar/scala-2.11.7" />
<root url="file://D:/tar/scala-2.11.7/src/actors" />
<root url="file://D:/tar/scala-2.11.7/src/forkjoin" />
<root url="file://D:/tar/scala-2.11.7/src/library" />
<root url="file://D:/tar/scala-2.11.7/src/partest-extras" />
<root url="file://D:/tar/scala-2.11.7/src/partest-javaagent" />
<root url="file://D:/tar/scala-2.11.7/src/repl" />
<root url="file://D:/tar/scala-2.11.7/test/disabled/pos/t1737" />
<root url="file://D:/tar/scala-2.11.7/test/disabled/presentation/akka/src" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/deprecation" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/duration-java" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t1143-2" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t1342" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t1464" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2163" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2470" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2570" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t2585" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t3003" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t3415" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/t7253" />
<root url="file://D:/tar/scala-2.11.7/test/files/jvm/varargs" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/abstract-class-error" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/primitive-sigs-1" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/raw-types-stubs" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t0673" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t2442" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t4851" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t6013" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t6289" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t750" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t750b" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t8244" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t8244e" />
<root url="file://D:/tar/scala-2.11.7/test/files/neg/t8376" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/ilya2" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/super" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t0695" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1101" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1102" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1150" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1152" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1176" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1186" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1196" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1197" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1230" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1231" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1232" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1235" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1254" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1409" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1642" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1711" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1745" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1751" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1782" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t1836" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2377" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2409" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2433" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2464" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t294" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2940" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t2956" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3120" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3249" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3349" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3404" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3429" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3486" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3521" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3567" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3642" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3938" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t3943" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t4603" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t4744" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t5165" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t5703" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t5957" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t6169" />
<root url="file://D:/tar/scala-2.11.7/test/files/pos/t942" />
<root url="file://D:/tar/scala-2.11.7/test/files/presentation/ide-bug-1000469/src" />
<root url="file://D:/tar/scala-2.11.7/test/files/presentation/ide-bug-1000531/src" />
<root url="file://D:/tar/scala-2.11.7/test/files/res/t6613" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/bcodeInlinerMixed" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/reflection-fancy-java-classes" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/reflection-java-annotations" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/reflection-java-crtp" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452a" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452b-bcode" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452d" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452e" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3452g" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t3897" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4238" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4317" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4729" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4788" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4788-separate-compilation" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t4891" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6168" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6168b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6240a" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6240b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t6548" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7008" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7246" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7246b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7359" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7374" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7439" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7455" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7741a" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t7741b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t8442" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t8601e" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9268" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9298" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9298b" />
<root url="file://D:/tar/scala-2.11.7/test/files/run/t9359" />
<root url="file://D:/tar/scala-2.11.7/test/files/t8449" />
<root url="file://D:/tar/scala-2.11.7/test/flaky/pos/t2868" />
<root url="file://D:/tar/scala-2.11.7/test/instrumented/library" />
<root url="file://D:/tar/scala-2.11.7/test/junit" />
<root url="file://D:/tar/scala-2.11.7/test/pending/jvm/t2705" />
<root url="file://D:/tar/scala-2.11.7/test/pending/pos/misc" />
<root url="file://D:/tar/scala-2.11.7/test/pending/pos/t3943" />
<root url="file://D:/tar/scala-2.11.7/test/pending/pos/t7778" />
<root url="file://D:/tar/scala-2.11.7/test/pending/run/t3899" />
<root url="file://D:/tar/scala-2.11.7/test/pending/run/t4713" />
<root url="file://D:/tar/scala-2.11.7/test/support/annotations" />
</SOURCES>
</library>
</component>

View File

@@ -0,0 +1,103 @@
package cn.ac.iie.dao;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.service.read.ReadHistoryArangoData;
import cn.ac.iie.utils.ArangoDBConnect;
import cn.ac.iie.utils.ExecutorThreadPool;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import com.arangodb.entity.BaseEdgeDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* 获取arangoDB历史数据
*
* @author wlh
*/
public class BaseArangoData {
private static final Logger LOG = LoggerFactory.getLogger(BaseArangoData.class);
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexIpMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseDocument>> historyVertexSubscriberMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnAddressIpMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationIpVisitFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationFqdnSameFqdnMap = new ConcurrentHashMap<>();
public static ConcurrentHashMap<Integer, ConcurrentHashMap<String, BaseEdgeDocument>> historyRelationSubsciberLocateIpMap = new ConcurrentHashMap<>();
private static ArangoDBConnect arangoDBConnect = ArangoDBConnect.getInstance();
private ExecutorThreadPool threadPool = ExecutorThreadPool.getInstance();
public <T extends BaseDocument> void readHistoryData(String table,
ConcurrentHashMap<Integer, ConcurrentHashMap<String, T>> historyMap,
Class<T> type) {
try {
LOG.warn("开始更新" + table);
long start = System.currentTimeMillis();
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
historyMap.put(i, new ConcurrentHashMap<>());
}
CountDownLatch countDownLatch = new CountDownLatch(ApplicationConfig.THREAD_POOL_NUMBER());
long[] timeRange = getTimeRange(table);
for (int i = 0; i < ApplicationConfig.THREAD_POOL_NUMBER(); i++) {
String sql = getQuerySql(timeRange, i, table);
ReadHistoryArangoData<T> readHistoryArangoData = new ReadHistoryArangoData<>(arangoDBConnect, sql, historyMap, type, table, countDownLatch);
threadPool.executor(readHistoryArangoData);
}
countDownLatch.await();
long last = System.currentTimeMillis();
LOG.warn("读取" + table + " arangoDB 共耗时:" + (last - start));
} catch (Exception e) {
e.printStackTrace();
}
}
private long[] getTimeRange(String table) {
long minTime = 0L;
long maxTime = 0L;
long startTime = System.currentTimeMillis();
String sql = "LET doc = (FOR doc IN " + table + " RETURN doc) return {max_time:MAX(doc[*].FIRST_FOUND_TIME),min_time:MIN(doc[*].FIRST_FOUND_TIME)}";
switch (ApplicationConfig.ARANGO_TIME_LIMIT_TYPE()) {
case 0:
ArangoCursor<BaseDocument> timeDoc = arangoDBConnect.executorQuery(sql, BaseDocument.class);
try {
if (timeDoc != null) {
while (timeDoc.hasNext()) {
BaseDocument doc = timeDoc.next();
maxTime = Long.parseLong(doc.getAttribute("max_time").toString()) + ApplicationConfig.THREAD_POOL_NUMBER();
minTime = Long.parseLong(doc.getAttribute("min_time").toString());
}
} else {
LOG.warn("获取ArangoDb时间范围为空");
}
} catch (Exception e) {
e.printStackTrace();
}
break;
case 1:
maxTime = ApplicationConfig.READ_ARANGO_MAX_TIME();
minTime = ApplicationConfig.READ_ARANGO_MIN_TIME();
break;
default:
}
long lastTime = System.currentTimeMillis();
LOG.warn(sql + "\n查询最大最小时间用时" + (lastTime - startTime));
return new long[]{minTime, maxTime};
}
private String getQuerySql(long[] timeRange, int threadNumber, String table) {
long minTime = timeRange[0];
long maxTime = timeRange[1];
long diffTime = (maxTime - minTime) / ApplicationConfig.THREAD_POOL_NUMBER();
long maxThreadTime = minTime + (threadNumber + 1) * diffTime;
long minThreadTime = minTime + threadNumber * diffTime;
return "FOR doc IN " + table + " filter doc.FIRST_FOUND_TIME >= " + minThreadTime + " and doc.FIRST_FOUND_TIME <= " + maxThreadTime + " " + ApplicationConfig.ARANGODB_READ_LIMIT() + " RETURN doc";
}
}

View File

@@ -0,0 +1,125 @@
package cn.ac.iie.service.read;
import cn.ac.iie.config.ApplicationConfig;
import cn.ac.iie.utils.ArangoDBConnect;
import com.arangodb.ArangoCursor;
import com.arangodb.entity.BaseDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
/**
* @author wlh
* 多线程全量读取arangoDb历史数据封装到map
*/
public class ReadHistoryArangoData<T extends BaseDocument> extends Thread {
public static long currentHour = System.currentTimeMillis() / (60 * 60 * 1000) * 60 * 60;
private static final Logger LOG = LoggerFactory.getLogger(ReadHistoryArangoData.class);
static final Integer RECENT_COUNT_HOUR = ApplicationConfig.RECENT_COUNT_HOUR();
public static final HashSet<String> PROTOCOL_SET;
static {
PROTOCOL_SET = new HashSet<>();
PROTOCOL_SET.add("HTTP");
PROTOCOL_SET.add("TLS");
PROTOCOL_SET.add("DNS");
}
private ArangoDBConnect arangoConnect;
private String query;
private ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map;
private Class<T> type;
private String table;
private CountDownLatch countDownLatch;
public ReadHistoryArangoData(ArangoDBConnect arangoConnect,
String query,
ConcurrentHashMap<Integer,ConcurrentHashMap<String, T>> map,
Class<T> type,
String table,
CountDownLatch countDownLatch) {
this.arangoConnect = arangoConnect;
this.query = query;
this.map = map;
this.type = type;
this.table = table;
this.countDownLatch = countDownLatch;
}
@Override
public void run() {
try {
long s = System.currentTimeMillis();
ArangoCursor<T> docs = arangoConnect.executorQuery(query, type);
if (docs != null) {
List<T> baseDocuments = docs.asListRemaining();
int i = 0;
for (T doc : baseDocuments) {
String key = doc.getKey();
switch (table) {
case "R_LOCATE_FQDN2IP":
updateProtocolDocument(doc);
deleteDistinctClientIpByTime(doc);
break;
case "R_VISIT_IP2FQDN":
updateProtocolDocument(doc);
break;
default:
}
int hashCode = Math.abs(key.hashCode()) % ApplicationConfig.THREAD_POOL_NUMBER();
ConcurrentHashMap<String, T> tmpMap = map.get(hashCode);
tmpMap.put(key, doc);
i++;
}
long l = System.currentTimeMillis();
LOG.warn(query + "\n读取" + i + "条数据,运行时间:" + (l - s));
}
}catch (Exception e){
e.printStackTrace();
}finally {
countDownLatch.countDown();
LOG.warn("本线程读取完毕,剩余线程数量:"+countDownLatch.getCount());
}
}
private void updateProtocolDocument(T doc) {
if (doc.getProperties().containsKey("PROTOCOL_TYPE")) {
for (String protocol : PROTOCOL_SET) {
String protocolRecent = protocol + "_CNT_RECENT";
ArrayList<Long> cntRecent = (ArrayList<Long>) doc.getAttribute(protocolRecent);
Long[] cntRecentsSrc = cntRecent.toArray(new Long[cntRecent.size()]);
Long[] cntRecentsDst = new Long[RECENT_COUNT_HOUR];
System.arraycopy(cntRecentsSrc, 0, cntRecentsDst, 1, cntRecentsSrc.length - 1);
cntRecentsDst[0] = 0L;
doc.addAttribute(protocolRecent, cntRecentsDst);
}
}
}
private void deleteDistinctClientIpByTime(T doc) {
ArrayList<String> distCip = (ArrayList<String>) doc.getAttribute("DIST_CIP");
ArrayList<Long> distCipTs = (ArrayList<Long>) doc.getAttribute("DIST_CIP_TS");
distCipTs.add(currentHour - RECENT_COUNT_HOUR * 3600);
Collections.sort(distCipTs);
int index = distCipTs.indexOf(currentHour - RECENT_COUNT_HOUR * 3600);
String[] distCipArr = new String[index];
long[] disCipTsArr = new long[index];
if (distCip.size() + 1 == distCipTs.size()){
for (int i = 0; i < index; i++) {
distCipArr[i] = distCip.get(i);
disCipTsArr[i] = distCipTs.get(i);
}
}
doc.updateAttribute("DIST_CIP", distCipArr);
doc.updateAttribute("DIST_CIP_TS", disCipTsArr);
}
}

View File

@@ -0,0 +1,116 @@
package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import com.arangodb.ArangoCollection;
import com.arangodb.ArangoCursor;
import com.arangodb.ArangoDB;
import com.arangodb.ArangoDatabase;
import com.arangodb.entity.DocumentCreateEntity;
import com.arangodb.entity.ErrorEntity;
import com.arangodb.entity.MultiDocumentEntity;
import com.arangodb.model.AqlQueryOptions;
import com.arangodb.model.DocumentCreateOptions;
import com.arangodb.util.MapBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
public class ArangoDBConnect {
private static final Logger LOG = LoggerFactory.getLogger(ArangoDBConnect.class);
private static ArangoDB arangoDB = null;
private static ArangoDBConnect conn = null;
static {
getArangoDatabase();
}
private static void getArangoDatabase(){
arangoDB = new ArangoDB.Builder()
.maxConnections(ApplicationConfig.THREAD_POOL_NUMBER())
.host(ApplicationConfig.ARANGODB_HOST(), ApplicationConfig.ARANGODB_PORT())
.user(ApplicationConfig.ARANGODB_USER())
.password(ApplicationConfig.ARANGODB_PASSWORD())
.build();
}
public static synchronized ArangoDBConnect getInstance(){
if (null == conn){
conn = new ArangoDBConnect();
}
return conn;
}
private ArangoDatabase getDatabase(){
return arangoDB.db(ApplicationConfig.ARANGODB_DB_NAME());
}
public void clean(){
try {
if (arangoDB != null){
arangoDB.shutdown();
}
}catch (Exception e){
e.printStackTrace();
}
}
public <T> ArangoCursor<T> executorQuery(String query,Class<T> type){
ArangoDatabase database = getDatabase();
Map<String, Object> bindVars = new MapBuilder().get();
AqlQueryOptions options = new AqlQueryOptions().ttl(ApplicationConfig.ARANGODB_TTL());
try {
return database.query(query, bindVars, options, type);
}catch (Exception e){
e.printStackTrace();
return null;
}finally {
bindVars.clear();
}
}
@Deprecated
public <T> void insertAndUpdate(ArrayList<T> docInsert,ArrayList<T> docUpdate,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
if (!docInsert.isEmpty()){
collection.importDocuments(docInsert);
}
if (!docUpdate.isEmpty()){
collection.replaceDocuments(docUpdate);
}
}catch (Exception e){
System.out.println("更新失败");
e.printStackTrace();
}finally {
docInsert.clear();
docInsert.clear();
}
}
public <T> void overwrite(ArrayList<T> docOverwrite,String collectionName){
ArangoDatabase database = getDatabase();
try {
ArangoCollection collection = database.collection(collectionName);
if (!docOverwrite.isEmpty()){
DocumentCreateOptions documentCreateOptions = new DocumentCreateOptions();
documentCreateOptions.overwrite(true);
documentCreateOptions.silent(true);
MultiDocumentEntity<DocumentCreateEntity<T>> documentCreateEntityMultiDocumentEntity = collection.insertDocuments(docOverwrite, documentCreateOptions);
Collection<ErrorEntity> errors = documentCreateEntityMultiDocumentEntity.getErrors();
for (ErrorEntity errorEntity:errors){
LOG.warn("写入arangoDB异常"+errorEntity.getErrorMessage());
}
}
}catch (Exception e){
System.out.println("更新失败:"+e.toString());
}finally {
docOverwrite.clear();
}
}
}

View File

@@ -0,0 +1,48 @@
package cn.ac.iie.utils;
import cn.ac.iie.config.ApplicationConfig;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.concurrent.*;
/**
* 线程池管理
* @author wlh
*/
public class ExecutorThreadPool {
private static ExecutorService pool = null ;
private static ExecutorThreadPool poolExecutor = null;
static {
getThreadPool();
}
private static void getThreadPool(){
ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
.setNameFormat("iplearning-application-pool-%d").build();
//Common Thread Pool
pool = new ThreadPoolExecutor(ApplicationConfig.THREAD_POOL_NUMBER(), ApplicationConfig.THREAD_POOL_NUMBER()*2,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
// pool = Executors.newFixedThreadPool(ApplicationConfig.THREAD_POOL_NUMBER);
}
public static ExecutorThreadPool getInstance(){
if (null == poolExecutor){
poolExecutor = new ExecutorThreadPool();
}
return poolExecutor;
}
public void executor(Runnable command){
pool.execute(command);
}
public void shutdown(){
pool.shutdown();
}
}

View File

@@ -0,0 +1,45 @@
#spark任务配置
spark.sql.shuffle.partitions=5
spark.executor.memory=4g
spark.app.name=test
spark.network.timeout=300s
repartitionNumber=36
spark.serializer=org.apache.spark.serializer.KryoSerializer
master=local[*]
#spark读取clickhouse配置
spark.read.clickhouse.url=jdbc:clickhouse://192.168.40.186:8123/tsg_galaxy_v3
spark.read.clickhouse.driver=ru.yandex.clickhouse.ClickHouseDriver
spark.read.clickhouse.user=default
spark.read.clickhouse.password=111111
spark.read.clickhouse.numPartitions=144
spark.read.clickhouse.fetchsize=10000
spark.read.clickhouse.partitionColumn=common_recv_time
clickhouse.socket.timeout=300000
#arangoDB配置
arangoDB.host=192.168.40.182
arangoDB.port=8529
arangoDB.user=upsert
arangoDB.password=ceiec2018
#arangoDB.DB.name=insert_iplearn_index
arangoDB.DB.name=ip-learning-test-0
arangoDB.ttl=3600
thread.pool.number=5
#读取clickhouse时间范围方式0读取过去一小时1指定时间范围
clickhouse.time.limit.type=0
read.clickhouse.max.time=1571245220
read.clickhouse.min.time=1571245210
#读取arangoDB时间范围方式0正常读1指定时间范围
arango.time.limit.type=0
read.arango.max.time=1571245320
read.arango.min.time=1571245200
arangoDB.read.limit=
update.arango.batch=10000
distinct.client.ip.num=10000
recent.count.hour=24
update.interval=3600

View File

@@ -0,0 +1,25 @@
######################### logger ##############################
log4j.logger.org.apache.http=OFF
log4j.logger.org.apache.http.wire=OFF
#Log4j
log4j.rootLogger=info,console,file
# <20><><EFBFBD><EFBFBD>̨<EFBFBD><CCA8>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=warn
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# <20>ļ<EFBFBD><C4BC><EFBFBD>־<EFBFBD><D6BE><EFBFBD><EFBFBD>
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=warn
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
<><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><D3A6>Ŀ<EFBFBD><C4BF>
#log4j.appender.file.file=/home/ceiec/iplearning/logs/ip-learning-application.log
#log4j.appender.file.file=/home/ceiec/iplearning/testLog/ip-learning-application.log
log4j.appender.file.file=./logs/ip-learning-application.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n

View File

@@ -0,0 +1,51 @@
package cn.ac.iie.config
import com.typesafe.config.{Config, ConfigFactory}
object ApplicationConfig {
private lazy val config: Config = ConfigFactory.load()
val SPARK_SQL_SHUFFLE_PARTITIONS: Int = config.getInt("spark.sql.shuffle.partitions")
val SPARK_EXECUTOR_MEMORY: String = config.getString("spark.executor.memory")
val SPARK_APP_NAME: String = config.getString("spark.app.name")
val SPARK_NETWORK_TIMEOUT: String = config.getString("spark.network.timeout")
// val REPARTITION_NUMBER: Int = config.getInt("repartitionNumber")
val MASTER: String = config.getString("master")
val SPARK_SERIALIZER: String = config.getString("spark.serializer")
val NUMPARTITIONS: String = config.getString("spark.read.clickhouse.numPartitions")
val SPARK_READ_CLICKHOUSE_URL: String = config.getString("spark.read.clickhouse.url")
val SPARK_READ_CLICKHOUSE_DRIVER: String = config.getString("spark.read.clickhouse.driver")
val SPARK_READ_CLICKHOUSE_USER: String = config.getString("spark.read.clickhouse.user")
val SPARK_READ_CLICKHOUSE_PASSWORD: String = config.getString("spark.read.clickhouse.password")
val SPARK_READ_CLICKHOUSE_FETCHSIZE: String = config.getString("spark.read.clickhouse.fetchsize")
val SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN: String = config.getString("spark.read.clickhouse.partitionColumn")
val ARANGODB_HOST: String= config.getString("arangoDB.host")
val ARANGODB_PORT: Int = config.getInt("arangoDB.port")
val ARANGODB_USER: String= config.getString("arangoDB.user")
val ARANGODB_PASSWORD:String= config.getString("arangoDB.password")
val ARANGODB_DB_NAME:String= config.getString("arangoDB.DB.name")
val ARANGODB_TTL: Int = config.getInt("arangoDB.ttl")
val CLICKHOUSE_SOCKET_TIMEOUT: Int = config.getInt("clickhouse.socket.timeout")
val THREAD_POOL_NUMBER: Int = config.getInt("thread.pool.number")
val CLICKHOUSE_TIME_LIMIT_TYPE: Int = config.getInt("clickhouse.time.limit.type")
val READ_CLICKHOUSE_MAX_TIME: Long = config.getLong("read.clickhouse.max.time")
val READ_CLICKHOUSE_MIN_TIME: Long = config.getLong("read.clickhouse.min.time")
val ARANGO_TIME_LIMIT_TYPE: Int = config.getInt("arango.time.limit.type")
val READ_ARANGO_MAX_TIME: Long = config.getLong("read.arango.max.time")
val READ_ARANGO_MIN_TIME: Long = config.getLong("read.arango.min.time")
val ARANGODB_READ_LIMIT: String = config.getString("arangoDB.read.limit")
val UPDATE_ARANGO_BATCH: Int = config.getInt("update.arango.batch")
val RECENT_COUNT_HOUR: Int = config.getInt("recent.count.hour")
val DISTINCT_CLIENT_IP_NUM: Int = config.getInt("distinct.client.ip.num")
val UPDATE_INTERVAL: Int = config.getInt("update.interval")
}

View File

@@ -0,0 +1,209 @@
package cn.ac.iie.dao
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.utils.SparkSessionUtil.spark
import org.apache.spark.sql.DataFrame
import org.slf4j.LoggerFactory
object BaseClickhouseData {
private val LOG = LoggerFactory.getLogger(BaseClickhouseData.getClass)
val currentHour: Long = System.currentTimeMillis / (60 * 60 * 1000) * 60 * 60
private val timeLimit: (Long, Long) = getTimeLimit
private def initClickhouseData(sql:String): Unit ={
val dataFrame: DataFrame = spark.read.format("jdbc")
.option("url", ApplicationConfig.SPARK_READ_CLICKHOUSE_URL)
.option("dbtable", sql)
.option("driver", ApplicationConfig.SPARK_READ_CLICKHOUSE_DRIVER)
.option("user", ApplicationConfig.SPARK_READ_CLICKHOUSE_USER)
.option("password", ApplicationConfig.SPARK_READ_CLICKHOUSE_PASSWORD)
.option("numPartitions", ApplicationConfig.NUMPARTITIONS)
.option("partitionColumn", ApplicationConfig.SPARK_READ_CLICKHOUSE_PARTITIONCOLUMN)
.option("lowerBound", timeLimit._2)
.option("upperBound", timeLimit._1)
.option("fetchsize", ApplicationConfig.SPARK_READ_CLICKHOUSE_FETCHSIZE)
.option("socket_timeout",ApplicationConfig.CLICKHOUSE_SOCKET_TIMEOUT)
.load()
dataFrame.printSchema()
dataFrame.createOrReplaceGlobalTempView("dbtable")
}
def loadConnectionDataFromCk(): Unit ={
val where = "common_recv_time >= " + timeLimit._2 + " AND common_recv_time < " + timeLimit._1
val sql =
s"""
|(SELECT
| ssl_sni,http_host,common_client_ip,common_server_ip,common_recv_time,common_c2s_byte_num,common_s2c_byte_num,common_schema_type
|FROM
| connection_record_log
|WHERE $where) as dbtable
""".stripMargin
LOG.warn(sql)
initClickhouseData(sql)
}
private def loadRadiusDataFromCk(): Unit ={
val where =
s"""
| common_recv_time >= ${timeLimit._2}
| AND common_recv_time < ${timeLimit._1}
| AND common_subscriber_id != ''
| AND radius_framed_ip != ''
| AND radius_packet_type = 4
| AND radius_acct_status_type = 1
""".stripMargin
val sql =
s"""
|(SELECT
| common_subscriber_id,radius_framed_ip,common_recv_time
|FROM
| tsg_galaxy_v3.radius_record_log
|WHERE
| $where) as dbtable
""".stripMargin
LOG.warn(sql)
initClickhouseData(sql)
}
def getVertexFqdnDf: DataFrame ={
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|FROM
| (
| (SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'SSL' GROUP BY ssl_sni
| )
| UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'HTTP' GROUP BY http_host
| )
| )
|GROUP BY
| FQDN
|HAVING
| FQDN != ''
""".stripMargin
LOG.warn(sql)
val vertexFqdnDf = spark.sql(sql)
vertexFqdnDf.printSchema()
vertexFqdnDf
}
def getVertexIpDf: DataFrame ={
loadConnectionDataFromCk()
val sql =
"""
|SELECT
| *
|FROM
| (
| (
| SELECT
| common_client_ip AS IP,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(common_c2s_byte_num) as BYTES_SUM,
| 'client' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| UNION ALL
| (
| SELECT
| common_server_ip AS IP,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| count(*) as SESSION_COUNT,
| sum(common_s2c_byte_num) as BYTES_SUM,
| 'server' as ip_type
| FROM
| global_temp.dbtable
| GROUP BY
| IP
| )
| )
""".stripMargin
LOG.warn(sql)
val vertexIpDf = spark.sql(sql)
vertexIpDf.printSchema()
vertexIpDf
}
def getRelationFqdnLocateIpDf: DataFrame ={
loadConnectionDataFromCk()
val sslSql =
"""
|SELECT
| ssl_sni AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'TLS' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'SSL'
|GROUP BY
| ssl_sni,common_server_ip
""".stripMargin
val httpSql =
"""
|SELECT
| http_host AS FQDN,
| common_server_ip,
| MAX(common_recv_time) AS LAST_FOUND_TIME,
| MIN(common_recv_time) AS FIRST_FOUND_TIME,
| COUNT(*) AS COUNT_TOTAL,
| collect_set(common_client_ip) AS DIST_CIP_RECENT,
| 'HTTP' AS schema_type
|FROM
| global_temp.dbtable
|WHERE
| common_schema_type = 'HTTP'
|GROUP BY
| http_host,common_server_ip
""".stripMargin
val sql = s"SELECT * FROM (($sslSql) UNION ALL ($httpSql)) WHERE FQDN != ''"
LOG.warn(sql)
val relationFqdnLocateIpDf = spark.sql(sql)
relationFqdnLocateIpDf.printSchema()
relationFqdnLocateIpDf
}
private def getTimeLimit: (Long,Long) ={
var maxTime = 0L
var minTime = 0L
ApplicationConfig.CLICKHOUSE_TIME_LIMIT_TYPE match {
case 0 =>
maxTime = currentHour
minTime = maxTime - ApplicationConfig.UPDATE_INTERVAL
case 1 =>
maxTime = ApplicationConfig.READ_CLICKHOUSE_MAX_TIME
minTime = ApplicationConfig.READ_CLICKHOUSE_MIN_TIME
case _ =>
}
(maxTime, minTime)
}
}

View File

@@ -0,0 +1,22 @@
package cn.ac.iie.main
import cn.ac.iie.service.update.UpdateDocument._
import cn.ac.iie.utils.{ExecutorThreadPool, SparkSessionUtil}
object IpLearningApplication {
private val pool = ExecutorThreadPool.getInstance
def main(args: Array[String]): Unit = {
try {
updateVertexFqdn()
updateVertexIp()
updateRelationFqdnLocateIp()
}catch {
case e:Exception => e.printStackTrace()
}finally {
pool.shutdown()
arangoManger.clean()
SparkSessionUtil.closeSpark()
}
}
}

View File

@@ -0,0 +1,88 @@
package cn.ac.iie.service.transform
import java.util.regex.Pattern
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseClickhouseData
import cn.ac.iie.spark.partition.CustomPartitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions._
import org.slf4j.LoggerFactory
object MergeDataFrame {
private val LOG = LoggerFactory.getLogger(MergeDataFrame.getClass)
private val pattern = Pattern.compile("^[\\d]*$")
def mergeVertexFqdn(): RDD[Row] ={
BaseClickhouseData.getVertexFqdnDf
.rdd.filter(row => isDomain(row.getAs[String](0))).map(row => (row.get(0),row))
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
}
def mergeVertexIp(): RDD[Row]={
val vertexIpDf = BaseClickhouseData.getVertexIpDf
val frame = vertexIpDf.groupBy("IP").agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("SESSION_COUNT").alias("SESSION_COUNT_LIST"),
collect_list("BYTES_SUM").alias("BYTES_SUM_LIST"),
collect_list("ip_type").alias("ip_type_list")
)
val values = frame.rdd.map(row => (row.get(0), row))
.partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
values
}
def mergeRelationFqdnLocateIp(): RDD[Row] ={
val frame = BaseClickhouseData.getRelationFqdnLocateIpDf.filter(row => isDomain(row.getAs[String]("FQDN")))
.groupBy("FQDN", "common_server_ip")
.agg(
min("FIRST_FOUND_TIME").alias("FIRST_FOUND_TIME"),
max("LAST_FOUND_TIME").alias("LAST_FOUND_TIME"),
collect_list("COUNT_TOTAL").alias("COUNT_TOTAL_LIST"),
collect_list("schema_type").alias("schema_type_list"),
collect_set("DIST_CIP_RECENT").alias("DIST_CIP_RECENT")
)
frame.rdd.map(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip")
val key = fqdn.concat("-"+serverIp)
(key,row)
}).partitionBy(new CustomPartitioner(ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)).values
}
private def isDomain(fqdn: String): Boolean = {
try {
if (fqdn == null || fqdn.length == 0) {
return false
}
if (fqdn.contains(":")) {
val s = fqdn.split(":")(0)
if (s.contains(":")){
return false
}
}
val fqdnArr = fqdn.split("\\.")
if (fqdnArr.length < 4 || fqdnArr.length > 4){
return true
}
for (f <- fqdnArr) {
if (pattern.matcher(f).matches) {
val i = f.toLong
if (i < 0 || i > 255) {
return true
}
} else {
return true
}
}
} catch {
case e: Exception =>
LOG.error("解析域名 " + fqdn + " 失败:\n" + e.toString)
}
false
}
}

View File

@@ -0,0 +1,123 @@
package cn.ac.iie.service.update
import java.lang
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.service.read.ReadHistoryArangoData
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import scala.collection.mutable
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocHandler {
val PROTOCOL_SET: Set[String] = Set("HTTP","TLS","DNS")
def updateMaxAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
var hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
if (newAttribute > hisAttritube){
hisAttritube = newAttribute
}
hisDoc.addAttribute(attributeName,hisAttritube)
}
def updateSumAttribute(hisDoc: BaseDocument,newAttribute:Long,attributeName:String): Unit ={
val hisAttritube = hisDoc.getAttribute(attributeName).toString.toLong
hisDoc.addAttribute(attributeName,newAttribute+hisAttritube)
}
def separateAttributeByIpType(ipTypeList:ofRef[String],
sessionCountList:ofRef[AnyRef],
bytesSumList:ofRef[AnyRef]): (Long,Long,Long,Long) ={
var serverSessionCount = 0L
var serverBytesSum = 0L
var clientSessionCount = 0L
var clientBytesSum = 0L
if (ipTypeList.length == sessionCountList.length && ipTypeList.length == bytesSumList.length){
sessionCountList.zip(bytesSumList).zip(ipTypeList).foreach(t => {
t._2 match {
case "server" =>
serverSessionCount = t._1._1.toString.toLong
serverBytesSum = t._1._2.toString.toLong
case "client" =>
clientSessionCount = t._1._1.toString.toLong
clientBytesSum = t._1._2.toString.toLong
}
})
}
(serverSessionCount, serverBytesSum, clientSessionCount, clientBytesSum)
}
def separateAttributeByProtocol(schemaTypeList:ofRef[AnyRef],countTotalList:ofRef[AnyRef]): Map[String, Long] ={
var protocolMap: Map[String, Long] = Map()
if (schemaTypeList.length == countTotalList.length){
protocolMap = schemaTypeList.zip(countTotalList).map(t => (t._1.toString,t._2.toString.toLong)).toMap
}
PROTOCOL_SET.foreach(protocol => {
if (!protocolMap.contains(protocol)){
protocolMap += (protocol -> 0L)
}
})
protocolMap
}
def updateProtocolAttritube(hisDoc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
var protocolType = hisDoc.getAttribute("PROTOCOL_TYPE").toString
protocolMap.foreach(t => {
if (t._2 > 0 && !protocolType.contains(t._1)){
protocolType = protocolType.concat(","+ t._1)
}
val cntTotalName = t._1.concat("_CNT_TOTAL")
val cntRecentName = t._1.concat("_CNT_RECENT")
val cntRecent: Array[lang.Long] = hisDoc.getAttribute(cntRecentName).asInstanceOf[Array[java.lang.Long]]
cntRecent.update(0,t._2)
updateSumAttribute(hisDoc,t._2,cntTotalName)
hisDoc.addAttribute(cntRecentName,cntRecent)
})
hisDoc.addAttribute("PROTOCOL_TYPE",protocolType)
}
def putProtocolAttritube(doc:BaseEdgeDocument, protocolMap: Map[String, Long]): Unit ={
val protocolTypeBuilder = new mutable.StringBuilder()
protocolMap.foreach(t => {
if (t._2 > 0){
protocolTypeBuilder.append(","+t._1)
}
val cntTotalName = t._1.concat("_CNT_TOTAL")
val cntRecentName = t._1.concat("_CNT_RECENT")
val cntRecent: Array[Long] = new Array[Long](ApplicationConfig.RECENT_COUNT_HOUR)
cntRecent.update(0,t._2)
doc.addAttribute(cntTotalName,t._2)
doc.addAttribute(cntRecentName,cntRecent)
})
doc.addAttribute("PROTOCOL_TYPE",protocolTypeBuilder.toString().replaceFirst(",",""))
}
def mergeDistinctIp(distCipRecent:ofRef[ofRef[String]]): Array[String] ={
distCipRecent.flatten.distinct.take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toArray
}
def putDistinctIp(doc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
val map = newDistinctIp.map(ip => {
(ip, ReadHistoryArangoData.currentHour)
}).toMap
doc.addAttribute("DIST_CIP",map.keys.toArray)
doc.addAttribute("DIST_CIP_TS",map.values.toArray)
}
def updateDistinctIp(hisDoc:BaseEdgeDocument,newDistinctIp:Array[String]): Unit ={
val hisDistCip = hisDoc.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
val hisDistCipTs = hisDoc.getAttribute("DIST_CIP_TS").asInstanceOf[Array[Long]]
if (hisDistCip.length == hisDistCipTs.length){
val distCipToTsMap: Map[String, Long] = hisDistCip.zip(hisDistCipTs).toMap
val muDistCipToTsMap: mutable.Map[String, Long] = mutable.Map(distCipToTsMap.toSeq:_*)
newDistinctIp.foreach(cip => {
muDistCipToTsMap.put(cip,ReadHistoryArangoData.currentHour)
})
val resultMap = muDistCipToTsMap.toList.sortBy(-_._2).take(ApplicationConfig.DISTINCT_CLIENT_IP_NUM).toMap
hisDoc.addAttribute("DIST_CIP",resultMap.keys.toArray)
hisDoc.addAttribute("DIST_CIP_TS",resultMap.values.toArray)
}
}
}

View File

@@ -0,0 +1,194 @@
package cn.ac.iie.service.update
import java.util
import cn.ac.iie.config.ApplicationConfig
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import cn.ac.iie.service.transform.MergeDataFrame._
import cn.ac.iie.service.update.UpdateDocHandler._
import cn.ac.iie.utils.ArangoDBConnect
import cn.ac.iie.utils.SparkSessionUtil.spark
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.slf4j.LoggerFactory
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocument {
val arangoManger: ArangoDBConnect = ArangoDBConnect.getInstance()
private val LOG = LoggerFactory.getLogger(UpdateDocument.getClass)
private val baseArangoData = new BaseArangoData()
def updateVertexFqdn(): Unit ={
baseArangoData.readHistoryData("FQDN",historyVertexFqdnMap,classOf[BaseDocument])
val hisVerFqdnBc = spark.sparkContext.broadcast(historyVertexFqdnMap)
try {
val start = System.currentTimeMillis()
val mergeVertexFqdnDf: RDD[Row] = mergeVertexFqdn()
mergeVertexFqdnDf.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val hisVerFqdnMapTmp = hisVerFqdnBc.value.get(partitionId)
val resultDocumentList: util.ArrayList[BaseDocument] = new util.ArrayList[BaseDocument]
var i = 0
iter.foreach(row => {
val fqdn = row.getAs[String]("FQDN")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
var document: BaseDocument = hisVerFqdnMapTmp.getOrDefault(fqdn,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
} else{
document = new BaseDocument
document.setKey(fqdn)
document.addAttribute("FQDN_NAME",fqdn)
document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
}
resultDocumentList.add(document)
i+=1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
arangoManger.overwrite(resultDocumentList, "FQDN")
LOG.warn("更新FQDN:" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "FQDN")
LOG.warn("更新FQDN:" + i)
}
})
val last = System.currentTimeMillis()
LOG.warn(s"更新FQDN时间${last-start}")
}catch {
case e:Exception => e.printStackTrace()
}finally {
hisVerFqdnBc.destroy()
}
}
def updateVertexIp(): Unit ={
baseArangoData.readHistoryData("IP",historyVertexIpMap,classOf[BaseDocument])
val hisVerIpBc = spark.sparkContext.broadcast(historyVertexIpMap)
try {
val start = System.currentTimeMillis()
val mergeVertexIpDf = mergeVertexIp()
mergeVertexIpDf.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val hisVerIpMapTmp = hisVerIpBc.value.get(partitionId)
val resultDocumentList: util.ArrayList[BaseDocument] = new util.ArrayList[BaseDocument]
var i = 0
iter.foreach(row => {
val ip = row.getAs[String]("IP")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val sessionCountList = row.getAs[ofRef[AnyRef]]("SESSION_COUNT_LIST")
val bytesSumList = row.getAs[ofRef[AnyRef]]("BYTES_SUM_LIST")
val ipTypeList = row.getAs[ofRef[String]]("ip_type_list")
val sepAttributeTuple = separateAttributeByIpType(ipTypeList,sessionCountList,bytesSumList)
var document = hisVerIpMapTmp.getOrDefault(ip,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
updateSumAttribute(document,sepAttributeTuple._1,"SERVER_SESSION_COUNT")
updateSumAttribute(document,sepAttributeTuple._2,"SERVER_BYTES_SUM")
updateSumAttribute(document,sepAttributeTuple._3,"CLIENT_SESSION_COUNT")
updateSumAttribute(document,sepAttributeTuple._4,"CLIENT_BYTES_SUM")
} else {
document = new BaseDocument
document.setKey(ip)
document.addAttribute("IP",ip)
document.addAttribute("FIRST_FOUND_TIME",firstFoundTime)
document.addAttribute("LAST_FOUND_TIME",lastFoundTime)
document.addAttribute("SERVER_SESSION_COUNT",sepAttributeTuple._1)
document.addAttribute("SERVER_BYTES_SUM",sepAttributeTuple._2)
document.addAttribute("CLIENT_SESSION_COUNT",sepAttributeTuple._3)
document.addAttribute("CLIENT_BYTES_SUM",sepAttributeTuple._4)
document.addAttribute("COMMON_LINK_INFO","")
}
resultDocumentList.add(document)
i+=1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn("更新IP:" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "IP")
LOG.warn("更新IP:" + i)
}
})
val last = System.currentTimeMillis()
LOG.warn(s"更新IP时间${last-start}")
}catch {
case e:Exception => e.printStackTrace()
}finally {
hisVerIpBc.destroy()
}
}
def updateRelationFqdnLocateIp(): Unit ={
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
val hisReFqdnLocIpBc = spark.sparkContext.broadcast(historyRelationFqdnAddressIpMap)
try {
val start = System.currentTimeMillis()
val mergeRelationFqdnLocateIpDf = mergeRelationFqdnLocateIp()
mergeRelationFqdnLocateIpDf.foreachPartition(iter => {
val partitionId: Int = TaskContext.get.partitionId
val hisRelaFqdnLocaIpMapTmp = hisReFqdnLocIpBc.value.get(partitionId)
val resultDocumentList: util.ArrayList[BaseEdgeDocument] = new util.ArrayList[BaseEdgeDocument]
var i = 0
iter.foreach(row => {
val fqdn = row.getAs[String]("FQDN")
val serverIp = row.getAs[String]("common_server_ip")
val firstFoundTime = row.getAs[Long]("FIRST_FOUND_TIME")
val lastFoundTime = row.getAs[Long]("LAST_FOUND_TIME")
val countTotalList = row.getAs[ofRef[AnyRef]]("COUNT_TOTAL_LIST")
val schemaTypeList = row.getAs[ofRef[AnyRef]]("schema_type_list")
val distCipRecent = row.getAs[ofRef[ofRef[String]]]("DIST_CIP_RECENT")
val sepAttritubeMap: Map[String, Long] = separateAttributeByProtocol(schemaTypeList,countTotalList)
val distinctIp: Array[String] = mergeDistinctIp(distCipRecent)
val key = fqdn.concat("-"+serverIp)
var document: BaseEdgeDocument = hisRelaFqdnLocaIpMapTmp.getOrDefault(key,null)
if (document != null){
updateMaxAttribute(document,lastFoundTime,"LAST_FOUND_TIME")
updateProtocolAttritube(document,sepAttritubeMap)
updateDistinctIp(document,distinctIp)
}else {
document = new BaseEdgeDocument()
document.setKey(key)
document.setFrom("FQDN/" + fqdn)
document.setTo("IP/" + serverIp)
document.addAttribute("FIRST_FOUND_TIME", firstFoundTime)
document.addAttribute("LAST_FOUND_TIME", lastFoundTime)
putProtocolAttritube(document,sepAttritubeMap)
putDistinctIp(document,distinctIp)
}
resultDocumentList.add(document)
i+=1
if (i >= ApplicationConfig.UPDATE_ARANGO_BATCH){
arangoManger.overwrite(resultDocumentList, "R_LOCATE_FQDN2IP")
LOG.warn("更新R_LOCATE_FQDN2IP:" + i)
i = 0
}
})
if (i != 0) {
arangoManger.overwrite(resultDocumentList, "R_LOCATE_FQDN2IP")
LOG.warn("更新R_LOCATE_FQDN2IP:" + i)
}
})
val last = System.currentTimeMillis()
LOG.warn(s"更新R_LOCATE_FQDN2IP时间${last-start}")
}catch {
case e:Exception => e.printStackTrace()
}finally {
hisReFqdnLocIpBc.destroy()
}
}
}

View File

@@ -0,0 +1,11 @@
package cn.ac.iie.spark.partition
import org.apache.spark.Partitioner
class CustomPartitioner(numPartition: Int) extends Partitioner{
override def numPartitions: Int = numPartition
override def getPartition(key: Any): Int = {
Math.abs(key.hashCode()) % numPartition
}
}

View File

@@ -0,0 +1,32 @@
package cn.ac.iie.utils
import cn.ac.iie.config.ApplicationConfig
import org.apache.spark.sql.SparkSession
import org.slf4j.LoggerFactory
object SparkSessionUtil {
private val LOG = LoggerFactory.getLogger(SparkSessionUtil.getClass)
val spark: SparkSession = getSparkSession
private def getSparkSession: SparkSession ={
val spark: SparkSession = SparkSession
.builder()
.appName(ApplicationConfig.SPARK_APP_NAME)
.config("spark.serializer", ApplicationConfig.SPARK_SERIALIZER)
.config("spark.network.timeout", ApplicationConfig.SPARK_NETWORK_TIMEOUT)
.config("spark.sql.shuffle.partitions", ApplicationConfig.SPARK_SQL_SHUFFLE_PARTITIONS)
.config("spark.executor.memory", ApplicationConfig.SPARK_EXECUTOR_MEMORY)
.master(ApplicationConfig.MASTER)
.getOrCreate()
LOG.warn("sparkession获取成功")
spark
}
def closeSpark(): Unit ={
if (spark != null){
spark.stop()
}
}
}

View File

@@ -0,0 +1,43 @@
package cn.ac.iie.dao
import cn.ac.iie.utils.SparkSessionUtil
import org.apache.spark.sql.SparkSession
object BaseClickhouseDataTest {
private val spark: SparkSession = SparkSessionUtil.spark
def main(args: Array[String]): Unit = {
BaseClickhouseData loadConnectionDataFromCk()
val sql =
"""
|SELECT
| FQDN,MAX( LAST_FOUND_TIME ) AS LAST_FOUND_TIME,MIN( FIRST_FOUND_TIME ) AS FIRST_FOUND_TIME
|FROM
| (
| (SELECT
| ssl_sni AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'SSL' GROUP BY ssl_sni
| )
| UNION ALL
| (SELECT
| http_host AS FQDN,MAX( common_recv_time ) AS LAST_FOUND_TIME,MIN( common_recv_time ) AS FIRST_FOUND_TIME
| FROM
| global_temp.dbtable
| WHERE
| common_schema_type = 'HTTP' GROUP BY http_host
| )
| )
|GROUP BY
| FQDN
|HAVING
| FQDN != ''
""".stripMargin
println(sql)
val vertexFqdnDf = spark.sql(sql)
vertexFqdnDf.show(10)
}
}

View File

@@ -0,0 +1,35 @@
package cn.ac.iie.service.update
import java.util
import java.util.ArrayList
import java.util.concurrent.ConcurrentHashMap
import cn.ac.iie.dao.BaseArangoData
import cn.ac.iie.dao.BaseArangoData._
import com.arangodb.entity.{BaseDocument, BaseEdgeDocument}
import scala.collection.mutable.WrappedArray.ofRef
object UpdateDocumentTest {
def main(args: Array[String]): Unit = {
val baseArangoData = new BaseArangoData()
baseArangoData.readHistoryData("R_LOCATE_FQDN2IP", historyRelationFqdnAddressIpMap, classOf[BaseEdgeDocument])
val value = BaseArangoData.historyRelationFqdnAddressIpMap.keys()
while (value.hasMoreElements) {
val integer: Integer = value.nextElement()
val map: ConcurrentHashMap[String, BaseEdgeDocument] = historyRelationFqdnAddressIpMap.get(integer)
val unit = map.keys()
while (unit.hasMoreElements) {
val key = unit.nextElement()
val edgeDocument = map.get(key)
// val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[util.ArrayList[Long]]
// val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[util.ArrayList[String]]
val strings = edgeDocument.getAttribute("DIST_CIP").asInstanceOf[Array[String]]
val longs = edgeDocument.getAttribute("DNS_CNT_RECENT").asInstanceOf[Array[java.lang.Long]]
println(longs.toString + "---" + strings.toString)
}
}
}
}