36 Commits

Author SHA1 Message Date
unknown
0662d265dd GAL-224 取消SSL检测,新增HDFS高可用设置 2022-11-28 16:42:56 +08:00
unknown
87fe11dc93 优化单机模式落地方式 2022-11-28 15:38:36 +08:00
unknown
9a2a5b3957 GAL-224 DoS检测支持知识库动态加载 2022-11-23 15:30:24 +08:00
fy
c58acdcfc9 Flink连接知识库实现方案初始准备 2022-11-14 14:34:00 +08:00
wanglihui
0a6f36393c DoS Detection Bifang Access Token 可支持动态配置 2022-07-19 17:20:09 +08:00
unknown
84a1e6879a 修复动态获取nacos配置 2022-06-28 11:15:49 +08:00
unknown
ab8f6aba81 修复动态获取nacos配置 2022-06-28 11:14:54 +08:00
unknown
94e8fb807a 集成nacos,修复依赖冲突,启动报错 2022-06-22 15:58:49 +08:00
wanglihui
cead1d4d99 Merge branch 'tsg-22.06' of git.mesalab.cn:bigdata/tsg/flink-dos-detection 2022-06-20 18:15:24 +08:00
徐鹏飞
2d98c3b6e6 修改获取静态阈值逻辑,根据默认指定vsysID获取 2022-06-13 18:00:57 +08:00
徐鹏飞
3dc29a07be 新增根据vsysId从bifang获取静态阈值 2022-05-31 18:00:36 +08:00
wanglihui
1fcdb79739 DoS事件日志对Conditions基于速率检测属性值修正 2022-05-17 13:54:43 +08:00
wanglihui
3d974217d9 集成nacos,将部分配置放到nacos中管理。 2022-04-08 10:16:29 +08:00
wanglihui
db17064f73 更新工具库到1.0.8适配新版MMDB定位库 2022-02-11 16:06:24 +08:00
wanglihui
065e5abb09 增加jasypt配置加密方式 2021-12-20 13:50:18 +08:00
wanglihui
75bbdd2962 更新DoS检测程序,新增读取baseline TTL配置。
修复DoS检测-Conditions阈值描述语言逻辑问题。
2021-12-08 13:51:09 +08:00
wanglihui
c46a395d9b 修改静态阈值,上下限判定等配置。 2021-11-26 09:46:53 +08:00
wanglihui
cc3f0cf620 解决获取配置为空时空指针异常bug。 2021-11-05 19:01:12 +08:00
wanglihui
0617b1e614 修改获取基线值策略,当基线type=3且基线值小于静态敏感阈值时,将其替换。 2021-10-29 18:43:05 +08:00
wanglihui
0125b031dd 代码格式化,实体类重写equals、hashcode方法。 2021-10-22 18:38:29 +08:00
wanglihui
177e7461cc 优化构建baseline方式 2021-10-21 18:27:48 +08:00
wanglihui
be916531fb 修改构建threshold RangeMap逻辑,基于attack type为key,避免IP冲突问题。 2021-10-20 18:23:12 +08:00
wanglihui
c692112445 增加基于packets与bits作为static条件判断依据。
修复static配置IP冲突问题。
2021-10-19 18:39:13 +08:00
wanglihui
b03ab9642d 修复静态条件与基线条件冲突bug 2021-09-28 16:11:52 +08:00
wanglihui
c44250bf73 新增读取DoS Detection Profiles IP冲突检测机制
修复DoS event日志end_time大于当前时间bug
2021-09-26 18:41:36 +08:00
wanglihui
77bc6a844e 修复读取配置IP冲突问题 2021-09-23 18:36:27 +08:00
wanglihui
e930fa23ed 修改判定逻辑,增加基线敏感阈值作为判定条件。 2021-09-16 18:47:00 +08:00
wanglihui
8cd4dea19e 获取静态阈值列表使用bifang默认token
修复获取baseline下标方法
2021-09-15 10:08:17 +08:00
wanglihui
62f3c65d66 基于DoS Sketch一元组进行实时检测 2021-09-14 18:46:23 +08:00
wanglihui
8cfb442c44 增加一元组作为基线生成数据源 2021-09-13 14:14:58 +08:00
wanglihui
4f8807dfa1 修改计算速率方式,使用session总数除以时间窗口 2021-09-13 09:46:02 +08:00
wanglihui
81f6499458 新增敏感阈值,过滤告警信息
修改计算平均值方式,先聚合再平均
2021-09-09 10:46:50 +08:00
wanglihui
b4237bb4a9 新增kafka sasl认证机制 2021-09-06 16:19:33 +08:00
wanglihui
c5943298bd 修复因double精度问题导致日志判定结果等级错误bug 2021-08-26 18:42:28 +08:00
wanglihui
b4f919647a 新增根据静态阈值判定dos攻击逻辑
新增定时器,定时获取静态阈值与baseline
2021-08-24 16:35:31 +08:00
wanglihui
55af33b508 新增解析静态阈值功能 2021-08-20 18:34:40 +08:00
41 changed files with 3304 additions and 238 deletions

105
pom.xml
View File

@@ -12,6 +12,8 @@
<flink.version>1.13.1</flink.version>
<hive.version>2.1.1</hive.version>
<hadoop.version>2.7.1</hadoop.version>
<scala.binary.version>2.11</scala.binary.version>
<jsonpath.version>2.4.0</jsonpath.version>
</properties>
<repositories>
@@ -102,6 +104,13 @@
<dependencies>
<!-- https://mvnrepository.com/artifact/org.jasypt/jasypt -->
<dependency>
<groupId>org.jasypt</groupId>
<artifactId>jasypt</artifactId>
<version>1.9.3</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
@@ -114,9 +123,15 @@
<version>1.7.21</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>${jsonpath.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_2.11</artifactId>
<artifactId>flink-connector-kafka_2.12</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
@@ -124,7 +139,7 @@
<!-- CLI dependencies -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_2.11</artifactId>
<artifactId>flink-clients_2.12</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
@@ -141,14 +156,26 @@
<artifactId>jdk.tools</artifactId>
<groupId>jdk.tools</groupId>
</exclusion>
<exclusion>
<artifactId>guava</artifactId>
<groupId>com.google.guava</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>2.2.3</version>
<!--<scope>provided</scope>-->
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
@@ -189,10 +216,16 @@
<version>5.5.2</version>
</dependency>
<dependency>
<groupId>com.github.seancfoley</groupId>
<artifactId>ipaddress</artifactId>
<version>5.3.3</version>
</dependency>
<dependency>
<groupId>com.zdjizhi</groupId>
<artifactId>galaxy</artifactId>
<version>1.0.6</version>
<version>1.1.1</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
@@ -208,6 +241,72 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.alibaba.nacos</groupId>
<artifactId>nacos-client</artifactId>
<version>1.2.0</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-codec/commons-codec -->
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.11</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-databind -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.9.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.google.guava/guava -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>22.0</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.2</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>2.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
</dependency>
</dependencies>

View File

@@ -1,12 +1,26 @@
package com.zdjizhi.common;
import com.zdjizhi.utils.CommonConfigurations;
import com.zdjizhi.utils.NacosUtils;
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
/**
* Created by wk on 2021/1/6.
* @author wlh
* @date 2021/1/6
*/
public class CommonConfig {
/**
* 定位库默认分隔符
*/
public static final String LOCATION_SEPARATOR = ".";
private static StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
static {
encryptor.setPassword("galaxy");
}
public static final int STREAM_EXECUTION_ENVIRONMENT_PARALLELISM = CommonConfigurations.getIntProperty("stream.execution.environment.parallelism");
public static final String STREAM_EXECUTION_JOB_NAME = CommonConfigurations.getStringProperty("stream.execution.job.name");
@@ -19,8 +33,6 @@ public class CommonConfig {
public static final String KAFKA_OUTPUT_METRIC_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.output.metric.topic.name");
public static final int KAFKA_OUTPUT_EVENT_PARALLELISM = CommonConfigurations.getIntProperty("kafka.output.event.parallelism");
public static final String KAFKA_OUTPUT_EVENT_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.output.event.topic.name");
public static final String KAFKA_OUTPUT_SKETCH_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.output.sketch.topic.name");
public static final int KAFKA_OUTPUT_SKETCH_PARALLELISM = CommonConfigurations.getIntProperty("kafka.output.sketch.parallelism");
public static final String KAFKA_OUTPUT_BOOTSTRAP_SERVERS = CommonConfigurations.getStringProperty("kafka.output.bootstrap.servers");
public static final String HBASE_ZOOKEEPER_QUORUM = CommonConfigurations.getStringProperty("hbase.zookeeper.quorum");
@@ -29,6 +41,7 @@ public class CommonConfig {
public static final String HBASE_BASELINE_TABLE_NAME = CommonConfigurations.getStringProperty("hbase.baseline.table.name");
public static final int HBASE_BASELINE_TOTAL_NUM = CommonConfigurations.getIntProperty("hbase.baseline.total.num");
public static final int HBASE_BASELINE_TTL = CommonConfigurations.getIntProperty("hbase.baseline.ttl");
public static final int FLINK_FIRST_AGG_PARALLELISM = CommonConfigurations.getIntProperty("flink.first.agg.parallelism");
public static final int FLINK_DETECTION_MAP_PARALLELISM = CommonConfigurations.getIntProperty("flink.detection.map.parallelism");
@@ -41,14 +54,66 @@ public class CommonConfig {
public static final String IP_MMDB_PATH = CommonConfigurations.getStringProperty("ip.mmdb.path");
public static final int SENSITIVITY_THRESHOLD = CommonConfigurations.getIntProperty("sensitivity.threshold");
// public static final int STATIC_SENSITIVITY_THRESHOLD = NacosUtils.getIntProperty("static.sensitivity.threshold");
// public static final double BASELINE_SENSITIVITY_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sensitivity.threshold");
//
// public static final double BASELINE_SESSIONS_MINOR_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.minor.threshold");
// public static final double BASELINE_SESSIONS_WARNING_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.warning.threshold");
// public static final double BASELINE_SESSIONS_MAJOR_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.major.threshold");
// public static final double BASELINE_SESSIONS_SEVERE_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.severe.threshold");
// public static final double BASELINE_SESSIONS_CRITICAL_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.critical.threshold");
public static final double BASELINE_SESSIONS_MINOR_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.minor.threshold");
public static final double BASELINE_SESSIONS_WARNING_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.warning.threshold");
public static final double BASELINE_SESSIONS_MAJOR_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.major.threshold");
public static final double BASELINE_SESSIONS_SEVERE_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.severe.threshold");
public static final double BASELINE_SESSIONS_CRITICAL_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.critical.threshold");
public static final String BIFANG_SERVER_URI = CommonConfigurations.getStringProperty("bifang.server.uri");
public static final String BIFANG_SERVER_TOKEN = CommonConfigurations.getStringProperty("bifang.server.token");
public static final String BIFANG_SERVER_ENCRYPTPWD_PATH = CommonConfigurations.getStringProperty("bifang.server.encryptpwd.path");
public static final String BIFANG_SERVER_LOGIN_PATH = CommonConfigurations.getStringProperty("bifang.server.login.path");
public static final String BIFANG_SERVER_POLICY_THRESHOLD_PATH = CommonConfigurations.getStringProperty("bifang.server.policy.threshold.path");
public static final String BIFANG_SERVER_POLICY_VSYSID_PATH = CommonConfigurations.getStringProperty("bifang.server.policy.vaysid.path");
public static final int HTTP_POOL_MAX_CONNECTION = CommonConfigurations.getIntProperty("http.pool.max.connection");
public static final int HTTP_POOL_MAX_PER_ROUTE = CommonConfigurations.getIntProperty("http.pool.max.per.route");
public static final int HTTP_POOL_REQUEST_TIMEOUT = CommonConfigurations.getIntProperty("http.pool.request.timeout");
public static final int HTTP_POOL_CONNECT_TIMEOUT = CommonConfigurations.getIntProperty("http.pool.connect.timeout");
public static final int HTTP_POOL_RESPONSE_TIMEOUT = CommonConfigurations.getIntProperty("http.pool.response.timeout");
public static final int STATIC_THRESHOLD_SCHEDULE_MINUTES = CommonConfigurations.getIntProperty("static.threshold.schedule.minutes");
public static final int BASELINE_THRESHOLD_SCHEDULE_DAYS = CommonConfigurations.getIntProperty("baseline.threshold.schedule.days");
public static final String SASL_JAAS_CONFIG_USER = CommonConfigurations.getStringProperty("sasl.jaas.config.user");
public static final String SASL_JAAS_CONFIG_PASSWORD = encryptor.decrypt(CommonConfigurations.getStringProperty("sasl.jaas.config.password"));
public static final int SASL_JAAS_CONFIG_FLAG = CommonConfigurations.getIntProperty("sasl.jaas.config.flag");
public static final String NACOS_SERVER_ADDR = CommonConfigurations.getStringProperty("nacos.server.addr");
public static final String NACOS_USERNAME = CommonConfigurations.getStringProperty("nacos.username");
public static final String NACOS_PASSWORD = CommonConfigurations.getStringProperty("nacos.password");
public static final String NACOS_DATA_ID = CommonConfigurations.getStringProperty("nacos.data.id");
public static final String NACOS_GROUP = CommonConfigurations.getStringProperty("nacos.group");
public static final int NACOS_READ_TIMEOUT = CommonConfigurations.getIntProperty("nacos.read.timeout");
public static final String HOS_TOKEN = CommonConfigurations.getStringProperty("hos.token");
public static final String CLUSTER_OR_SINGLE = CommonConfigurations.getStringProperty("cluster.or.single");
public static final String HDFS_URI_NS1 = CommonConfigurations.getStringProperty("hdfs.uri.nn1");
public static final String HDFS_URI_NS2 = CommonConfigurations.getStringProperty("hdfs.uri.nn2");
public static final String HDFS_PATH = CommonConfigurations.getStringProperty("hdfs.path");
public static final String HDFS_USER = CommonConfigurations.getStringProperty("hdfs.user");
public static final String DOWNLOAD_PATH = CommonConfigurations.getStringProperty("download.path");
public static void main(String[] args) {
StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
// 配置加密解密的密码/salt值
encryptor.setPassword("galaxy");
// 对"raw_password"进行加密S5kR+Y7CI8k7MaecZpde25yK8NKUnd6p
// String password = "galaxy2019";
String password = "nacos";
String encPwd = encryptor.encrypt(password);
System.out.println(encPwd);
// 再进行解密raw_password
String rawPwd = encryptor.decrypt(encPwd);
System.out.println(rawPwd);
}
}

View File

@@ -0,0 +1,26 @@
package com.zdjizhi.common;
import java.io.Serializable;
public class CustomFile implements Serializable {
String fileName;
byte[] content;
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public byte[] getContent() {
return content;
}
public void setContent(byte[] content) {
this.content = content;
}
}

View File

@@ -0,0 +1,63 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Objects;
public class DosBaselineThreshold implements Serializable {
private ArrayList<Integer> session_rate;
private Integer session_rate_baseline_type;
private Integer session_rate_default_value;
public ArrayList<Integer> getSession_rate() {
return session_rate;
}
public void setSession_rate(ArrayList<Integer> session_rate) {
this.session_rate = session_rate;
}
public Integer getSession_rate_baseline_type() {
return session_rate_baseline_type;
}
public void setSession_rate_baseline_type(Integer session_rate_baseline_type) {
this.session_rate_baseline_type = session_rate_baseline_type;
}
public Integer getSession_rate_default_value() {
return session_rate_default_value;
}
public void setSession_rate_default_value(Integer session_rate_default_value) {
this.session_rate_default_value = session_rate_default_value;
}
@Override
public String toString() {
return "DosBaselineThreshold{" +
"session_rate=" + session_rate +
", session_rate_baseline_type=" + session_rate_baseline_type +
", session_rate_default_value=" + session_rate_default_value +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosBaselineThreshold)) {
return false;
}
DosBaselineThreshold that = (DosBaselineThreshold) o;
return Objects.equals(getSession_rate(), that.getSession_rate()) &&
Objects.equals(getSession_rate_baseline_type(), that.getSession_rate_baseline_type()) &&
Objects.equals(getSession_rate_default_value(), that.getSession_rate_default_value());
}
@Override
public int hashCode() {
return Objects.hash(getSession_rate(), getSession_rate_baseline_type(), getSession_rate_default_value());
}
}

View File

@@ -0,0 +1,121 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Objects;
/**
* @author wlh
*/
public class DosDetectionThreshold implements Serializable {
private String profileId;
private String attackType;
private ArrayList<String> serverIpList;
private String serverIpAddr;
private long packetsPerSec;
private long bitsPerSec;
private long sessionsPerSec;
private int isValid;
@Override
public String toString() {
return "DosDetectionThreshold{" +
"profileId='" + profileId + '\'' +
", attackType='" + attackType + '\'' +
", serverIpList=" + serverIpList +
", serverIpAddr='" + serverIpAddr + '\'' +
", packetsPerSec=" + packetsPerSec +
", bitsPerSec=" + bitsPerSec +
", sessionsPerSec=" + sessionsPerSec +
", isValid=" + isValid +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DosDetectionThreshold threshold = (DosDetectionThreshold) o;
return packetsPerSec == threshold.packetsPerSec &&
bitsPerSec == threshold.bitsPerSec &&
sessionsPerSec == threshold.sessionsPerSec &&
isValid == threshold.isValid &&
Objects.equals(profileId, threshold.profileId) &&
Objects.equals(attackType, threshold.attackType) &&
Objects.equals(serverIpList, threshold.serverIpList) &&
Objects.equals(serverIpAddr, threshold.serverIpAddr);
}
@Override
public int hashCode() {
return Objects.hash(profileId, attackType, serverIpList, serverIpAddr, packetsPerSec, bitsPerSec, sessionsPerSec, isValid);
}
public String getProfileId() {
return profileId;
}
public void setProfileId(String profileId) {
this.profileId = profileId;
}
public String getAttackType() {
return attackType;
}
public void setAttackType(String attackType) {
this.attackType = attackType;
}
public ArrayList<String> getServerIpList() {
return serverIpList;
}
public void setServerIpList(ArrayList<String> serverIpList) {
this.serverIpList = serverIpList;
}
public String getServerIpAddr() {
return serverIpAddr;
}
public void setServerIpAddr(String serverIpAddr) {
this.serverIpAddr = serverIpAddr;
}
public long getPacketsPerSec() {
return packetsPerSec;
}
public void setPacketsPerSec(long packetsPerSec) {
this.packetsPerSec = packetsPerSec;
}
public long getBitsPerSec() {
return bitsPerSec;
}
public void setBitsPerSec(long bitsPerSec) {
this.bitsPerSec = bitsPerSec;
}
public long getSessionsPerSec() {
return sessionsPerSec;
}
public void setSessionsPerSec(long sessionsPerSec) {
this.sessionsPerSec = sessionsPerSec;
}
public int getIsValid() {
return isValid;
}
public void setIsValid(int isValid) {
this.isValid = isValid;
}
}

View File

@@ -1,6 +1,7 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.Objects;
public class DosEventLog implements Serializable {
@@ -140,4 +141,33 @@ public class DosEventLog implements Serializable {
", bit_rate=" + bit_rate +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosEventLog)) {
return false;
}
DosEventLog that = (DosEventLog) o;
return getLog_id() == that.getLog_id() &&
getStart_time() == that.getStart_time() &&
getEnd_time() == that.getEnd_time() &&
getSession_rate() == that.getSession_rate() &&
getPacket_rate() == that.getPacket_rate() &&
getBit_rate() == that.getBit_rate() &&
Objects.equals(getAttack_type(), that.getAttack_type()) &&
Objects.equals(getSeverity(), that.getSeverity()) &&
Objects.equals(getConditions(), that.getConditions()) &&
Objects.equals(getDestination_ip(), that.getDestination_ip()) &&
Objects.equals(getDestination_country(), that.getDestination_country()) &&
Objects.equals(getSource_ip_list(), that.getSource_ip_list()) &&
Objects.equals(getSource_country_list(), that.getSource_country_list());
}
@Override
public int hashCode() {
return Objects.hash(getLog_id(), getStart_time(), getEnd_time(), getAttack_type(), getSeverity(), getConditions(), getDestination_ip(), getDestination_country(), getSource_ip_list(), getSource_country_list(), getSession_rate(), getPacket_rate(), getBit_rate());
}
}

View File

@@ -1,6 +1,7 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.Objects;
public class DosMetricsLog implements Serializable {
@@ -80,4 +81,27 @@ public class DosMetricsLog implements Serializable {
", partition_num=" + partition_num +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosMetricsLog)) {
return false;
}
DosMetricsLog that = (DosMetricsLog) o;
return getSketch_start_time() == that.getSketch_start_time() &&
getSession_rate() == that.getSession_rate() &&
getPacket_rate() == that.getPacket_rate() &&
getBit_rate() == that.getBit_rate() &&
getPartition_num() == that.getPartition_num() &&
Objects.equals(getAttack_type(), that.getAttack_type()) &&
Objects.equals(getDestination_ip(), that.getDestination_ip());
}
@Override
public int hashCode() {
return Objects.hash(getSketch_start_time(), getAttack_type(), getDestination_ip(), getSession_rate(), getPacket_rate(), getBit_rate(), getPartition_num());
}
}

View File

@@ -1,6 +1,7 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.Objects;
public class DosSketchLog implements Serializable {
@@ -31,6 +32,32 @@ public class DosSketchLog implements Serializable {
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosSketchLog)) {
return false;
}
DosSketchLog sketchLog = (DosSketchLog) o;
return getSketch_start_time() == sketchLog.getSketch_start_time() &&
getSketch_duration() == sketchLog.getSketch_duration() &&
getSketch_sessions() == sketchLog.getSketch_sessions() &&
getSketch_packets() == sketchLog.getSketch_packets() &&
getSketch_bytes() == sketchLog.getSketch_bytes() &&
Objects.equals(getCommon_sled_ip(), sketchLog.getCommon_sled_ip()) &&
Objects.equals(getCommon_data_center(), sketchLog.getCommon_data_center()) &&
Objects.equals(getAttack_type(), sketchLog.getAttack_type()) &&
Objects.equals(getSource_ip(), sketchLog.getSource_ip()) &&
Objects.equals(getDestination_ip(), sketchLog.getDestination_ip());
}
@Override
public int hashCode() {
return Objects.hash(getCommon_sled_ip(), getCommon_data_center(), getSketch_start_time(), getSketch_duration(), getAttack_type(), getSource_ip(), getDestination_ip(), getSketch_sessions(), getSketch_packets(), getSketch_bytes());
}
public String getCommon_sled_ip() {
return common_sled_ip;
}

View File

@@ -0,0 +1,22 @@
package com.zdjizhi.common;
import java.util.Objects;
public class DosVsysId {
private int vsysId;
public int getVsysId() {
return vsysId;
}
public void setVsysId(int vsysId) {
this.vsysId = vsysId;
}
@Override
public String toString() {
return "DosVsysId{" +
"vsysId=" + vsysId +
'}';
}
}

View File

@@ -0,0 +1,91 @@
package com.zdjizhi.common;
public class KnowledgeLog {
public String id;
public String name;
public String path;
public Long size;
public String format;
public String sha256;
public String version;
public String updateTime;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public Long getSize() {
return size;
}
public void setSize(Long size) {
this.size = size;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getSha256() {
return sha256;
}
public void setSha256(String sha256) {
this.sha256 = sha256;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getUpdateTime() {
return updateTime;
}
public void setUpdateTime(String updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "KnowledgeLog{" +
"id='" + id + '\'' +
", name='" + name + '\'' +
", path='" + path + '\'' +
", size=" + size +
", format='" + format + '\'' +
", sha256='" + sha256 + '\'' +
", version='" + version + '\'' +
", updateTime='" + updateTime + '\'' +
'}';
}
}

View File

@@ -1,16 +1,15 @@
package com.zdjizhi.etl;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosEventLog;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.utils.HbaseUtils;
import com.zdjizhi.utils.IpUtils;
import com.zdjizhi.utils.SnowflakeId;
import com.zdjizhi.common.*;
import com.zdjizhi.utils.*;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.text.StrBuilder;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -27,21 +26,32 @@ import java.util.concurrent.TimeUnit;
public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
private static final Logger logger = LoggerFactory.getLogger(DosDetection.class);
private static Map<String, Map<String, Tuple2<ArrayList<Integer>, Integer>>> baselineMap;
private final static int BASELINE_SIZE = 144;
private static Map<String, Map<String, DosBaselineThreshold>> baselineMap = new HashMap<>();
private final static NumberFormat PERCENT_INSTANCE = NumberFormat.getPercentInstance();
private HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> thresholdRangeMap;
private final static int BASELINE_SIZE = 144;
private final static int STATIC_CONDITION_TYPE = 1;
private final static int BASELINE_CONDITION_TYPE = 2;
private final static int SENSITIVITY_CONDITION_TYPE = 3;
private final static String SESSIONS_TAG = "sessions";
private final static String PACKETS_TAG = "packets";
private final static String BITS_TAG = "bits";
private final static int OTHER_BASELINE_TYPE = 3;
@Override
public void open(Configuration parameters) {
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(2,
new BasicThreadFactory.Builder().namingPattern("Dos-Detection-%d").daemon(true).build());
try {
executorService.scheduleAtFixedRate(() -> {
//do something
baselineMap = HbaseUtils.readFromHbase();
}, 0, CommonConfig.BASELINE_THRESHOLD_SCHEDULE_DAYS, TimeUnit.DAYS);
executorService.scheduleAtFixedRate(() -> thresholdRangeMap = ParseStaticThreshold.createStaticThreshold(), 0,
CommonConfig.STATIC_THRESHOLD_SCHEDULE_MINUTES, TimeUnit.MINUTES);
}catch (Exception e){
executorService.scheduleAtFixedRate(() -> baselineMap = ParseBaselineThreshold.readFromHbase(), 0,
CommonConfig.BASELINE_THRESHOLD_SCHEDULE_DAYS, TimeUnit.DAYS);
} catch (Exception e) {
logger.error("定时器任务执行失败", e);
}
PERCENT_INSTANCE.setMinimumFractionDigits(2);
@@ -49,43 +59,100 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
@Override
public DosEventLog map(DosSketchLog value) {
DosEventLog finalResult = null;
try {
String destinationIp = value.getDestination_ip();
String attackType = value.getAttack_type();
IPAddress destinationIpAddress = new IPAddressString(destinationIp).getAddress();
DosDetectionThreshold threshold = thresholdRangeMap.getOrDefault(attackType, TreeRangeMap.create()).get(destinationIpAddress);
logger.debug("当前判断IP{}, 类型: {}", destinationIp, attackType);
long sketchSessions = value.getSketch_sessions();
if (sketchSessions > CommonConfig.SENSITIVITY_THRESHOLD && baselineMap.containsKey(destinationIp)) {
Tuple2<ArrayList<Integer>, Integer> floodTypeTup = baselineMap.get(destinationIp).get(attackType);
Integer base = getBaseValue(floodTypeTup, value);
long diff = sketchSessions - base;
if (diff > 0 && base != 0) {
double percent = getDiffPercent(diff, base);
Severity severity = judgeSeverity(percent);
if (severity != Severity.NORMAL) {
DosEventLog result = getResult(value, severity, percent);
logger.info("检测到当前server IP {} 存在 {} 异常,超出基线{} {}倍,日志详情\n {}", destinationIp, attackType, base, percent, result);
return result;
} else {
logger.debug("当前server IP{} 未出现 {} 异常,日志详情 {}", destinationIp, attackType, value);
}
}
if (threshold == null && baselineMap.containsKey(destinationIp)) {
finalResult = getDosEventLogByBaseline(value);
} else if (threshold == null && !baselineMap.containsKey(destinationIp)) {
finalResult = getDosEventLogBySensitivityThreshold(value);
} else if (threshold != null) {
finalResult = getDosEventLogByStaticThreshold(value, threshold);
} else {
logger.debug("未获取到当前server IP{} 类型 {} baseline数据", destinationIp, attackType);
logger.debug("未获取到当前server IP{} 类型 {} 静态阈值 和 baseline", destinationIp, attackType);
}
} catch (Exception e) {
logger.error("判定失败\n {} \n{}", value, e);
}
return null;
return finalResult;
}
private DosEventLog getResult(DosSketchLog value, Severity severity, double percent) {
private DosEventLog getDosEventLogBySensitivityThreshold(DosSketchLog value) {
DosEventLog result = null;
long sketchSessions = value.getSketch_sessions();
if (sketchSessions > NacosUtils.getIntProperty("static.sensitivity.threshold")) {
long diff = sketchSessions - NacosUtils.getIntProperty("static.sensitivity.threshold");
result = getDosEventLog(value, NacosUtils.getIntProperty("static.sensitivity.threshold"), diff, SENSITIVITY_CONDITION_TYPE, SESSIONS_TAG);
result.setSeverity(Severity.MAJOR.severity);
}
return result;
}
private DosEventLog getDosEventLogByBaseline(DosSketchLog value) {
DosEventLog result = null;
String destinationIp = value.getDestination_ip();
String attackType = value.getAttack_type();
long sketchSessions = value.getSketch_sessions();
if (sketchSessions > NacosUtils.getIntProperty("static.sensitivity.threshold")) {
DosBaselineThreshold dosBaselineThreshold = baselineMap.get(destinationIp).get(attackType);
Integer base = getBaseValue(dosBaselineThreshold, value);
long diff = sketchSessions - base;
result = getDosEventLog(value, base, diff, BASELINE_CONDITION_TYPE, SESSIONS_TAG);
}
return result;
}
private DosEventLog getDosEventLogByStaticThreshold(DosSketchLog value, DosDetectionThreshold threshold) {
long base = threshold.getSessionsPerSec();
long diff = value.getSketch_sessions() - base;
DosEventLog result = getDosEventLog(value, base, diff, STATIC_CONDITION_TYPE, SESSIONS_TAG);
if (result == null) {
base = threshold.getPacketsPerSec();
diff = value.getSketch_packets() - base;
result = getDosEventLog(value, base, diff, STATIC_CONDITION_TYPE, PACKETS_TAG);
if (result == null) {
base = threshold.getBitsPerSec();
diff = value.getSketch_bytes() - base;
result = getDosEventLog(value, base, diff, STATIC_CONDITION_TYPE, BITS_TAG);
}
}
return result;
}
private DosEventLog getDosEventLog(DosSketchLog value, long base, long diff, int type, String tag) {
DosEventLog result = null;
String destinationIp = value.getDestination_ip();
String attackType = value.getAttack_type();
if (diff > 0 && base != 0) {
double percent = getDiffPercent(diff, base);
Severity severity = judgeSeverity(percent);
if (severity != Severity.NORMAL) {
if (type == BASELINE_CONDITION_TYPE && percent < NacosUtils.getDoubleProperty("baseline.sensitivity.threshold")) {
logger.debug("当前server IP:{},类型:{},基线值{}百分比{}未超过基线敏感阈值,日志详情\n{}", destinationIp, attackType, base, percent, value);
} else {
result = getResult(value, base, severity, percent+1, type, tag);
logger.info("检测到当前server IP {} 存在 {} 异常,超出基线{} {}倍,基于{}:{}检测,日志详情\n {}", destinationIp,attackType,base,percent,type,tag,result);
}
} else {
logger.debug("当前server IP:{} 未出现 {} 异常,日志详情 {}", destinationIp, attackType, value);
}
}
return result;
}
private DosEventLog getResult(DosSketchLog value, long base, Severity severity, double percent, int type, String tag) {
DosEventLog dosEventLog = new DosEventLog();
dosEventLog.setLog_id(SnowflakeId.generateId());
dosEventLog.setStart_time(value.getSketch_start_time());
dosEventLog.setEnd_time(value.getSketch_start_time() + CommonConfig.FLINK_WINDOW_MAX_TIME);
dosEventLog.setEnd_time(value.getSketch_start_time() + value.getSketch_duration());
dosEventLog.setAttack_type(value.getAttack_type());
dosEventLog.setSeverity(severity.toString());
dosEventLog.setConditions(getConditions(PERCENT_INSTANCE.format(percent)));
dosEventLog.setSeverity(severity.severity);
dosEventLog.setConditions(getConditions(PERCENT_INSTANCE.format(percent), base, value.getSketch_sessions(), type, tag));
dosEventLog.setDestination_ip(value.getDestination_ip());
dosEventLog.setDestination_country(IpUtils.ipLookup.countryLookup(value.getDestination_ip()));
String ipList = value.getSource_ip();
@@ -97,12 +164,13 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
return dosEventLog;
}
private Integer getBaseValue(Tuple2<ArrayList<Integer>, Integer> floodTypeTup, DosSketchLog value) {
private Integer getBaseValue(DosBaselineThreshold dosBaselineThreshold, DosSketchLog value) {
Integer base = 0;
try {
if (floodTypeTup != null) {
ArrayList<Integer> baselines = floodTypeTup.f0;
Integer defaultVaule = floodTypeTup.f1;
if (dosBaselineThreshold != null) {
ArrayList<Integer> baselines = dosBaselineThreshold.getSession_rate();
Integer defaultVaule = dosBaselineThreshold.getSession_rate_default_value();
Integer sessionRateBaselineType = dosBaselineThreshold.getSession_rate_baseline_type();
if (baselines != null && baselines.size() == BASELINE_SIZE) {
int timeIndex = getCurrentTimeIndex(value.getSketch_start_time());
base = baselines.get(timeIndex);
@@ -110,6 +178,9 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
logger.debug("获取到当前IP: {},类型: {} baseline值为0,替换为P95观测值{}", value.getDestination_ip(), value.getAttack_type(), defaultVaule);
base = defaultVaule;
}
if (sessionRateBaselineType == OTHER_BASELINE_TYPE && base < NacosUtils.getIntProperty("static.sensitivity.threshold")){
base = NacosUtils.getIntProperty("static.sensitivity.threshold");
}
}
}
} catch (Exception e) {
@@ -118,47 +189,87 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
return base;
}
private String getConditions(String percent) {
return "sessions > " + percent + " of baseline";
private String getConditions(String percent, long base, long sessions, int type, String tag) {
switch (type) {
case STATIC_CONDITION_TYPE:
return new StrBuilder()
.append("Rate > ")
.append(base).append(" ")
.append(tag).append("/s")
.toString();
case BASELINE_CONDITION_TYPE:
return new StrBuilder()
.append(tag).append(" > ")
.append(percent).append(" of baseline")
.toString();
case SENSITIVITY_CONDITION_TYPE:
return new StrBuilder()
.append(sessions).append(" ")
.append(tag).append("/s Unusually high ")
.append(StringUtils.capitalize(tag))
.toString();
default:
throw new IllegalArgumentException("Illegal Argument type:" + type + ", known types = [1,2,3]");
}
}
private String getSourceCountryList(String sourceIpList) {
String[] ipArr = sourceIpList.split(",");
HashSet<String> countrySet = new HashSet<>();
for (String ip : ipArr) {
countrySet.add(IpUtils.ipLookup.countryLookup(ip));
if (StringUtil.isNotBlank(sourceIpList)) {
String countryList;
try {
String[] ipArr = sourceIpList.split(",");
HashSet<String> countrySet = new HashSet<>();
for (String ip : ipArr) {
String country = IpUtils.ipLookup.countryLookup(ip);
if (StringUtil.isNotBlank(country)){
countrySet.add(country);
}
}
countryList = StringUtils.join(countrySet, ", ");
return countryList;
} catch (Exception e) {
logger.error("{} source IP lists 获取国家失败", sourceIpList, e);
return StringUtil.EMPTY;
}
} else {
throw new IllegalArgumentException("Illegal Argument sourceIpList = null");
}
return StringUtils.join(countrySet, ",");
}
private int getCurrentTimeIndex(long sketchStartTime) {
long currentDayTime = sketchStartTime / (60 * 60 * 24) * 60 * 60 * 24;
long indexLong = (sketchStartTime - currentDayTime) / 600;
return Integer.parseInt(Long.toString(indexLong));
int index = 0;
try {
long currentDayTime = DateUtils.getTimeFloor(new Date(sketchStartTime * 1000L), "P1D").getTime() / 1000;
long indexLong = (sketchStartTime - currentDayTime) / (86400 / BASELINE_SIZE);
index = Integer.parseInt(Long.toString(indexLong));
} catch (Exception e) {
logger.error("获取time index失败", e);
}
return index;
}
public static void main(String[] args) {
// System.out.println(new DosDetection().getSourceCountryList("192.0.2.3,138.199.14.31,255.255.255.255,121.14.89.209," +
// "23.200.74.224,161.117.68.253"));
// DosDetection dosDetection = new DosDetection();
// System.out.println(dosDetection.judgeSeverity(dosDetection.getDiffPercent(499, 1000)));
}
private Double getDiffPercent(long diff, long base) {
return BigDecimal.valueOf((float) diff / base).setScale(4, BigDecimal.ROUND_HALF_UP).doubleValue();
}
public static void main(String[] args) {
DosDetection dosDetection = new DosDetection();
double diffPercent = dosDetection.getDiffPercent(135, 17);
System.out.println(diffPercent);
System.out.println(dosDetection.judgeSeverity(4.2857142857142856E14));
System.out.println(BigDecimal.valueOf((float) 10 / 3).setScale(4, BigDecimal.ROUND_HALF_UP).doubleValue());
}
private Severity judgeSeverity(double diffPercent) {
if (diffPercent >= CommonConfig.BASELINE_SESSIONS_MINOR_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_WARNING_THRESHOLD) {
if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.minor.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.warning.threshold")) {
return Severity.MINOR;
} else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_WARNING_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_MAJOR_THRESHOLD) {
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.warning.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.major.threshold")) {
return Severity.WARNING;
} else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_MAJOR_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_SEVERE_THRESHOLD) {
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.major.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.severe.threshold")) {
return Severity.MAJOR;
} else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_SEVERE_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_CRITICAL_THRESHOLD) {
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.severe.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.critical.threshold")) {
return Severity.SEVERE;
} else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_CRITICAL_THRESHOLD) {
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.critical.threshold")) {
return Severity.CRITICAL;
} else {
return Severity.NORMAL;

View File

@@ -21,6 +21,9 @@ import static com.zdjizhi.sink.OutputStreamSink.outputTag;
public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosSketchLog, Tuple2<String,String>, TimeWindow> {
private static final Logger logger = LoggerFactory.getLogger(EtlProcessFunction.class);
private static final String EMPTY_SOURCE_IP_IPV4 = "0.0.0.0";
private static final String EMPTY_SOURCE_IP_IPV6 = "::";
@Override
public void process(Tuple2<String, String> keys,
Context context, Iterable<DosSketchLog> elements,
@@ -60,29 +63,32 @@ public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosS
}
private Tuple6<Long, Long, Long,String,Long,Long> sketchAggregate(Iterable<DosSketchLog> elements){
int cnt = 1;
long sessions = 0;
long packets = 0 ;
long bytes = 0;
long startTime = 0;
long startTime = System.currentTimeMillis()/1000;
long endTime = System.currentTimeMillis()/1000;
long duration = 0;
HashSet<String> sourceIpSet = new HashSet<>();
try {
for (DosSketchLog newSketchLog : elements){
sessions += newSketchLog.getSketch_sessions();
packets += newSketchLog.getSketch_packets();
bytes += newSketchLog.getSketch_bytes();
startTime = newSketchLog.getSketch_start_time();
duration = newSketchLog.getSketch_duration();
cnt += 1;
if (sourceIpSet.size() < CommonConfig.SOURCE_IP_LIST_LIMIT){
sourceIpSet.add(newSketchLog.getSource_ip());
String sourceIp = newSketchLog.getSource_ip();
if (StringUtils.equals(sourceIp,EMPTY_SOURCE_IP_IPV4) || StringUtils.equals(sourceIp,EMPTY_SOURCE_IP_IPV6)){
sessions += newSketchLog.getSketch_sessions();
packets += newSketchLog.getSketch_packets();
bytes += newSketchLog.getSketch_bytes();
startTime = newSketchLog.getSketch_start_time() > startTime ? startTime : newSketchLog.getSketch_start_time();
endTime = newSketchLog.getSketch_start_time() > endTime ? newSketchLog.getSketch_start_time() : endTime;
duration = endTime - startTime == 0 ? 5 : endTime - startTime;
}else {
if (sourceIpSet.size() < CommonConfig.SOURCE_IP_LIST_LIMIT){
sourceIpSet.add(sourceIp);
}
}
}
String sourceIpList = StringUtils.join(sourceIpSet, ",");
// return Tuple6.of(sessions/cnt/duration,packets/cnt/duration,bytes/cnt/duration,sourceIpList,startTime,duration);
return Tuple6.of(sessions/CommonConfig.FLINK_WINDOW_MAX_TIME,packets/CommonConfig.FLINK_WINDOW_MAX_TIME,
bytes/CommonConfig.FLINK_WINDOW_MAX_TIME,sourceIpList,startTime,duration);
bytes*8/CommonConfig.FLINK_WINDOW_MAX_TIME,sourceIpList,startTime,duration);
}catch (Exception e){
logger.error("聚合中间结果集失败 {}",e);
}

View File

@@ -0,0 +1,109 @@
package com.zdjizhi.etl;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosBaselineThreshold;
import com.zdjizhi.utils.DateUtils;
import com.zdjizhi.utils.HbaseUtils;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
public class ParseBaselineThreshold {
private static final Logger logger = LoggerFactory.getLogger(ParseBaselineThreshold.class);
private static ArrayList<String> floodTypeList = new ArrayList<>();
private static Table table = null;
private static Scan scan = null;
static {
floodTypeList.add("TCP SYN Flood");
floodTypeList.add("UDP Flood");
floodTypeList.add("ICMP Flood");
floodTypeList.add("DNS Flood");
}
private static void prepareHbaseEnv() throws IOException {
org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
config.set("hbase.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
config.set("hbase.client.retries.number", "3");
config.set("hbase.bulkload.retries.number", "3");
config.set("zookeeper.recovery.retry", "3");
config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, CommonConfig.HBASE_CLIENT_OPERATION_TIMEOUT);
config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CommonConfig.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
TableName tableName = TableName.valueOf(CommonConfig.HBASE_BASELINE_TABLE_NAME);
Connection conn = ConnectionFactory.createConnection(config);
table = conn.getTable(tableName);
long currentTimeMillis = System.currentTimeMillis();
scan = new Scan()
.setAllowPartialResults(true)
.setTimeRange(DateUtils.getSomeDate(new Date(currentTimeMillis), Math.negateExact(CommonConfig.HBASE_BASELINE_TTL)).getTime(), currentTimeMillis)
.setLimit(CommonConfig.HBASE_BASELINE_TOTAL_NUM);
logger.info("连接hbase成功正在读取baseline数据");
}
static Map<String, Map<String, DosBaselineThreshold>> readFromHbase() {
Map<String, Map<String, DosBaselineThreshold>> baselineMap = new HashMap<>();
try {
prepareHbaseEnv();
logger.info("开始读取baseline数据");
ResultScanner rs = table.getScanner(scan);
for (Result result : rs) {
Map<String, DosBaselineThreshold> floodTypeMap = new HashMap<>();
String rowkey = Bytes.toString(result.getRow());
for (String type:floodTypeList){
DosBaselineThreshold baselineThreshold = new DosBaselineThreshold();
ArrayList<Integer> sessionRate = HbaseUtils.getArraylist(result, type, "session_rate");
if (sessionRate != null && !sessionRate.isEmpty()){
Integer defaultValue = HbaseUtils.getIntegerValue(result, type, "session_rate_default_value");
Integer rateBaselineType = HbaseUtils.getIntegerValue(result, type, "session_rate_baseline_type");
baselineThreshold.setSession_rate(sessionRate);
baselineThreshold.setSession_rate_default_value(defaultValue);
baselineThreshold.setSession_rate_baseline_type(rateBaselineType);
floodTypeMap.put(type,baselineThreshold);
}
}
baselineMap.put(rowkey, floodTypeMap);
}
logger.info("格式化baseline数据成功读取IP共{}", baselineMap.size());
} catch (Exception e) {
logger.error("读取hbase数据失败", e);
}
return baselineMap;
}
public static void main(String[] args) {
long currentTimeMillis = System.currentTimeMillis();
long p200D = DateUtils.getSomeDate(new Date(currentTimeMillis), Math.negateExact(CommonConfig.HBASE_BASELINE_TTL)).getTime();
System.out.println(p200D);
System.out.println(currentTimeMillis);
System.out.println(currentTimeMillis - p200D);
Map<String, Map<String, DosBaselineThreshold>> baselineMap = readFromHbase();
Set<String> keySet = baselineMap.keySet();
for (String key : keySet) {
Map<String, DosBaselineThreshold> stringTuple2Map = baselineMap.get(key);
Set<String> strings = stringTuple2Map.keySet();
for (String s:strings){
DosBaselineThreshold dosBaselineThreshold = stringTuple2Map.get(s);
System.out.println(key+"---"+s+"---"+dosBaselineThreshold);
}
}
System.out.println(baselineMap.size());
}
}

View File

@@ -1,21 +1,31 @@
package com.zdjizhi.etl;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.fasterxml.jackson.databind.JavaType;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.function.BroadcastProcessFunc;
import com.zdjizhi.source.DosSketchSource;
import com.zdjizhi.utils.FlinkEnvironmentUtils;
import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.StringUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.*;
/**
* @author wlh
@@ -23,13 +33,38 @@ import java.util.HashMap;
public class ParseSketchLog {
private static Logger logger = LoggerFactory.getLogger(ParseSketchLog.class);
private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
private static JavaType hashmapJsonType = jsonMapperInstance.createCollectionType(HashMap.class, String.class, Object.class);
private static JavaType listType = jsonMapperInstance.createCollectionType(ArrayList.class, HashMap.class);
public static SingleOutputStreamOperator<DosSketchLog> getSketchSource(){
return flatSketchSource().assignTimestampsAndWatermarks(createWatermarkStrategy());
}
private static SingleOutputStreamOperator<DosSketchLog> flatSketchSource(){
return DosSketchSource.createDosSketchSource().flatMap(new FlatSketchLog());
DataStreamSource<Map<String, byte[]>> broadcastSource=null;
Properties nacosProperties = new Properties();
nacosProperties.put(PropertyKeyConst.SERVER_ADDR,CommonConfig.NACOS_SERVER_ADDR);
nacosProperties.setProperty(PropertyKeyConst.USERNAME, CommonConfig.NACOS_USERNAME);
nacosProperties.setProperty(PropertyKeyConst.PASSWORD, CommonConfig.NACOS_PASSWORD);
if ("CLUSTER".equals(CommonConfig.CLUSTER_OR_SINGLE)){
broadcastSource = DosSketchSource.broadcastSource(nacosProperties,CommonConfig.HDFS_PATH);
}else {
broadcastSource= DosSketchSource.singleBroadcastSource(nacosProperties);
}
MapStateDescriptor<String,Map> descriptor =
new MapStateDescriptor<>("descriptorTest", Types.STRING, TypeInformation.of(Map.class));
BroadcastStream<Map<String, byte[]>> broadcast = broadcastSource.broadcast(descriptor);
// BroadcastConnectedStream<String, List<CustomFile>> connect = DosSketchSource.createDosSketchSource().connect(broadcast);
return DosSketchSource.createDosSketchSource()
.connect(broadcast).process(new BroadcastProcessFunc());
// .flatMap(new FlatSketchLog());
}
private static WatermarkStrategy<DosSketchLog> createWatermarkStrategy(){
@@ -43,11 +78,11 @@ public class ParseSketchLog {
public void flatMap(String s, Collector<DosSketchLog> collector) {
try {
if (StringUtil.isNotBlank(s)){
HashMap<String, Object> sketchSource = (HashMap<String, Object>) JsonMapper.fromJsonString(s, Object.class);
HashMap<String, Object> sketchSource = jsonMapperInstance.fromJson(s, hashmapJsonType);
long sketchStartTime = Long.parseLong(sketchSource.get("sketch_start_time").toString());
long sketchDuration = Long.parseLong(sketchSource.get("sketch_duration").toString());
String attackType = sketchSource.get("attack_type").toString();
ArrayList<HashMap<String, Object>> reportIpList = (ArrayList<HashMap<String, Object>>) sketchSource.get("report_ip_list");
ArrayList<HashMap<String, Object>> reportIpList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(sketchSource.get("report_ip_list")), listType);
for (HashMap<String, Object> obj : reportIpList) {
DosSketchLog dosSketchLog = new DosSketchLog();
dosSketchLog.setSketch_start_time(sketchStartTime);

View File

@@ -0,0 +1,275 @@
package com.zdjizhi.etl;
import com.fasterxml.jackson.databind.JavaType;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosDetectionThreshold;
import com.zdjizhi.common.DosVsysId;
import com.zdjizhi.utils.HttpClientUtils;
import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.NacosUtils;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import org.apache.flink.shaded.guava18.com.google.common.collect.Range;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* @author wlh
*/
public class ParseStaticThreshold {
private static Logger logger = LoggerFactory.getLogger(ParseStaticThreshold.class);
private static String encryptpwd;
private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
private static JavaType hashmapJsonType = jsonMapperInstance.createCollectionType(HashMap.class, String.class, Object.class);
private static JavaType thresholdType = jsonMapperInstance.createCollectionType(ArrayList.class, DosDetectionThreshold.class);
private static JavaType vsysIDType = jsonMapperInstance.createCollectionType(ArrayList.class, DosVsysId.class);
static {
//加载加密登录密码
encryptpwd = getEncryptpwd();
}
/**
* 获取加密密码
*/
private static String getEncryptpwd() {
String psw = HttpClientUtils.ERROR_MESSAGE;
try {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("password", "admin");
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_ENCRYPTPWD_PATH, parms);
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build());
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
psw = data.get("encryptpwd").toString();
} else {
logger.error(msg);
}
}
} catch (URISyntaxException e) {
logger.error("构造URI异常", e);
} catch (Exception e) {
logger.error("获取encryptpwd失败", e);
}
return psw;
}
/**
* 登录bifang服务获取token
*
* @return token
*/
private static String loginBifangServer() {
String token = HttpClientUtils.ERROR_MESSAGE;
try {
if (!HttpClientUtils.ERROR_MESSAGE.equals(encryptpwd)) {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("username", "admin");
parms.put("password", encryptpwd);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_LOGIN_PATH, parms);
String resposeJsonStr = HttpClientUtils.httpPost(uriBuilder.build(), null);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
token = data.get("token").toString();
} else {
logger.error(msg);
}
}
}
} catch (Exception e) {
logger.error("登录失败,未获取到token ", e);
}
return token;
}
/**
* 获取vsysId配置列表
*
* @return vsysIdList
*/
private static ArrayList<DosVsysId> getVsysId() {
ArrayList<DosVsysId> vsysIdList = null;
try {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
parms.put("orderBy", "vsysId desc");
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_VSYSID_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
BasicHeader authorization = new BasicHeader("Authorization", token);
BasicHeader authorization1 = new BasicHeader("Content-Type", "application/x-www-form-urlencoded");
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build(), authorization, authorization1);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
Object list = data.get("list");
if (list != null) {
vsysIdList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), vsysIDType);
logger.info("获取到vsysId{}条", vsysIdList.size());
} else {
logger.warn("vsysIdList为空");
}
} else {
logger.error(msg);
}
}
}
} catch (Exception e) {
logger.error("获取vsysId失败,请检查bifang服务或登录配置信息 ", e);
}
return vsysIdList;
}
/**
* 根据vsysId获取静态阈值配置列表
*
* @return thresholds
*/
private static ArrayList<DosDetectionThreshold> getDosDetectionThreshold() {
ArrayList<DosDetectionThreshold> thresholds = null;
// ArrayList<DosVsysId> vsysId = getVsysId();
try {
// if (vsysId != null){
// for (DosVsysId dosVsysId : vsysId) {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
parms.put("orderBy", "profileId asc");
parms.put("isValid", 1);
// parms.put("vsysId", dosVsysId.getVsysId());
parms.put("vsysId", 1);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_THRESHOLD_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
BasicHeader authorization = new BasicHeader("Authorization", token);
BasicHeader authorization1 = new BasicHeader("Content-Type", "application/x-www-form-urlencoded");
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build(), authorization, authorization1);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
Object list = data.get("list");
if (list != null) {
thresholds = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), thresholdType);
logger.info("获取到静态阈值配置{}条", thresholds.size());
} else {
logger.warn("静态阈值配置为空");
}
} else {
logger.error(msg);
}
}
}
// }
// }
} catch (Exception e) {
logger.error("获取静态阈值配置失败,请检查bifang服务或登录配置信息 ", e);
}
return thresholds;
}
/**
* 基于静态阈值构建threshold RangeMapk:IP段或具体IPv:配置信息
*
* @return threshold RangeMap
*/
static HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> createStaticThreshold() {
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> thresholdRangeMap = new HashMap<>(4);
try {
ArrayList<DosDetectionThreshold> dosDetectionThreshold = getDosDetectionThreshold();
if (dosDetectionThreshold != null && !dosDetectionThreshold.isEmpty()) {
for (DosDetectionThreshold threshold : dosDetectionThreshold) {
String attackType = threshold.getAttackType();
TreeRangeMap<IPAddress, DosDetectionThreshold> treeRangeMap = thresholdRangeMap.getOrDefault(attackType, TreeRangeMap.create());
ArrayList<String> serverIpList = threshold.getServerIpList();
for (String sip : serverIpList) {
IPAddressString ipAddressString = new IPAddressString(sip);
if (ipAddressString.isIPAddress()) {
IPAddress address = ipAddressString.getAddress();
if (address.isPrefixed()) {
IPAddress lower = address.getLower();
IPAddress upper = address.getUpper();
if (!address.isMultiple()) {
lower = address.adjustPrefixLength(address.getBitCount());
upper = address.toMaxHost().withoutPrefixLength();
}
Map.Entry<Range<IPAddress>, DosDetectionThreshold> lowerEntry = treeRangeMap.getEntry(lower);
Map.Entry<Range<IPAddress>, DosDetectionThreshold> upperEntry = treeRangeMap.getEntry(upper);
if (lowerEntry != null && upperEntry == null) {
Range<IPAddress> lowerEntryKey = lowerEntry.getKey();
DosDetectionThreshold lowerEntryValue = lowerEntry.getValue();
treeRangeMap.put(Range.closedOpen(lowerEntryKey.lowerEndpoint(), lower), lowerEntryValue);
treeRangeMap.put(Range.closed(lower, upper), threshold);
} else if (lowerEntry == null && upperEntry != null) {
Range<IPAddress> upperEntryKey = upperEntry.getKey();
DosDetectionThreshold upperEntryValue = upperEntry.getValue();
treeRangeMap.put(Range.openClosed(upper, upperEntryKey.upperEndpoint()), upperEntryValue);
treeRangeMap.put(Range.closed(lower, upper), threshold);
} else {
treeRangeMap.put(Range.closed(lower, upper), threshold);
}
} else {
treeRangeMap.put(Range.closed(address, address), threshold);
}
}
}
thresholdRangeMap.put(attackType, treeRangeMap);
}
}
} catch (Exception e) {
logger.error("构建threshold RangeMap失败", e);
}
return thresholdRangeMap;
}
public static void main(String[] args) {
ArrayList<DosDetectionThreshold> dosDetectionThreshold = getDosDetectionThreshold();
dosDetectionThreshold.forEach(System.out::println);
System.out.println("------------------------");
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> staticThreshold = createStaticThreshold();
System.out.println("------------------------");
for (String type : staticThreshold.keySet()) {
Map<Range<IPAddress>, DosDetectionThreshold> asMapOfRanges = staticThreshold.get(type).asMapOfRanges();
for (Range<IPAddress> range : asMapOfRanges.keySet()) {
DosDetectionThreshold threshold = asMapOfRanges.get(range);
System.out.println(type + "---" + range + "---" + threshold);
}
System.out.println("------------------------");
}
// String s = loginBifangServer();
// System.out.println(s);
}
}

View File

@@ -0,0 +1,76 @@
package com.zdjizhi.function;
import com.fasterxml.jackson.databind.JavaType;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.etl.ParseSketchLog;
import com.zdjizhi.utils.IpUtils;
import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.StringUtil;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class BroadcastProcessFunc extends BroadcastProcessFunction<String, Map<String, byte[]>, DosSketchLog> {
private static Logger logger = LoggerFactory.getLogger(ParseSketchLog.class);
private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
private static JavaType hashmapJsonType = jsonMapperInstance.createCollectionType(HashMap.class, String.class, Object.class);
private static JavaType listType = jsonMapperInstance.createCollectionType(ArrayList.class, HashMap.class);
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
System.out.println("begin init");
IpUtils.loadIpLook();
System.out.println("init over");
}
@Override
public void processElement(String value, ReadOnlyContext ctx, Collector<DosSketchLog> out) throws Exception {
try {
if (StringUtil.isNotBlank(value)){
HashMap<String, Object> sketchSource = jsonMapperInstance.fromJson(value, hashmapJsonType);
long sketchStartTime = Long.parseLong(sketchSource.get("sketch_start_time").toString());
long sketchDuration = Long.parseLong(sketchSource.get("sketch_duration").toString());
String attackType = sketchSource.get("attack_type").toString();
ArrayList<HashMap<String, Object>> reportIpList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(sketchSource.get("report_ip_list")), listType);
for (HashMap<String, Object> obj : reportIpList) {
DosSketchLog dosSketchLog = new DosSketchLog();
dosSketchLog.setSketch_start_time(sketchStartTime);
dosSketchLog.setSketch_duration(sketchDuration);
dosSketchLog.setAttack_type(attackType);
String sourceIp = obj.get("source_ip").toString();
String destinationIp = obj.get("destination_ip").toString();
long sketchSessions = Long.parseLong(obj.get("sketch_sessions").toString());
long sketchPackets = Long.parseLong(obj.get("sketch_packets").toString());
long sketchBytes = Long.parseLong(obj.get("sketch_bytes").toString());
dosSketchLog.setSource_ip(sourceIp);
dosSketchLog.setDestination_ip(destinationIp);
dosSketchLog.setSketch_sessions(sketchSessions);
dosSketchLog.setSketch_packets(sketchPackets);
dosSketchLog.setSketch_bytes(sketchBytes);
out.collect(dosSketchLog);
logger.debug("数据解析成功:{}",dosSketchLog.toString());
}
}
} catch (Exception e) {
logger.error("数据解析错误:{} \n{}",value,e);
}
}
@Override
public void processBroadcastElement(Map<String, byte[]> value, Context ctx, Collector<DosSketchLog> out) throws Exception {
IpUtils.updateIpLook(value);
}
}

View File

@@ -1,21 +0,0 @@
package com.zdjizhi.sink;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.KafkaUtils;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import java.util.Objects;
class DosSketchSink {
static void dosSketchOutputSink(SingleOutputStreamOperator<DosSketchLog> sketchSource){
sketchSource
.filter(Objects::nonNull)
.map(JsonMapper::toJsonString)
.addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_SKETCH_TOPIC_NAME))
.setParallelism(CommonConfig.KAFKA_OUTPUT_SKETCH_PARALLELISM);
}
}

View File

@@ -27,9 +27,6 @@ public class OutputStreamSink {
public static void finalOutputSink(){
try {
SingleOutputStreamOperator<DosSketchLog> sketchSource = ParseSketchLog.getSketchSource();
DosSketchSink.dosSketchOutputSink(sketchSource);
SingleOutputStreamOperator<DosSketchLog> middleStream = getMiddleStream();
DosEventSink.dosEventOutputSink(getEventSinkStream(middleStream));
TrafficServerIpMetricsSink.sideOutputMetricsSink(middleStream);

View File

@@ -16,6 +16,10 @@ class TrafficServerIpMetricsSink {
DataStream<DosMetricsLog> sideOutput = outputStream.getSideOutput(outputTag);
sideOutput.map(JsonMapper::toJsonString).addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_METRIC_TOPIC_NAME))
.setParallelism(CommonConfig.KAFKA_OUTPUT_METRIC_PARALLELISM);
}
}

View File

@@ -1,12 +1,15 @@
package com.zdjizhi.source;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.utils.FlinkEnvironmentUtils;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
@@ -20,10 +23,24 @@ public class DosSketchSource {
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", CommonConfig.KAFKA_INPUT_BOOTSTRAP_SERVERS);
properties.setProperty("group.id", CommonConfig.KAFKA_GROUP_ID);
if (CommonConfig.SASL_JAAS_CONFIG_FLAG == 1){
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "PLAIN");
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\""+CommonConfig.SASL_JAAS_CONFIG_USER+"\" password=\""+CommonConfig.SASL_JAAS_CONFIG_PASSWORD+"\";");
}
return streamExeEnv.addSource(new FlinkKafkaConsumer<String>(
CommonConfig.KAFKA_INPUT_TOPIC_NAME,
new SimpleStringSchema(), properties))
.setParallelism(CommonConfig.KAFKA_INPUT_PARALLELISM);
}
public static DataStreamSource<Map<String, byte[]>> broadcastSource(Properties nacosProperties, String STORE_PATH){
return streamExeEnv.addSource(new HttpSource(nacosProperties, CommonConfig.NACOS_DATA_ID, CommonConfig.NACOS_GROUP, CommonConfig.NACOS_READ_TIMEOUT,STORE_PATH));
}
public static DataStreamSource<Map<String, byte[]>> singleBroadcastSource(Properties nacosProperties){
return streamExeEnv.addSource(new SingleHttpSource(nacosProperties, CommonConfig.NACOS_DATA_ID, CommonConfig.NACOS_GROUP, CommonConfig.NACOS_READ_TIMEOUT));
}
}

View File

@@ -0,0 +1,182 @@
package com.zdjizhi.source;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.json.JSONObject;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.base.Joiner;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.KnowledgeLog;
import com.zdjizhi.utils.*;
import org.apache.commons.io.IOUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executor;
public class HttpSource extends RichHttpSourceFunction<Map<String, byte[]>> {
private static final Logger logger = LoggerFactory.getLogger(HttpSource.class);
private static final String EXPR = "$.[?(@.version=='latest' && @.name in ['ip_v4_built_in','ip_v6_built_in','ip_v4_user_defined','ip_v6_user_defined'])].['name','sha256','format','path']";
//连接nacos的配置
private Properties nacosProperties;
//nacos data id
private String NACOS_DATA_ID;
//nacos group
private String NACOS_GROUP;
//nacos 连接超时时间
private long NACOS_READ_TIMEOUT;
//上传到hdfs的路径
private String STORE_PATH;
private ConfigService configService;
// private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
// private static JavaType listType = jsonMapperInstance.createCollectionType(List.class, KnowledgeLog.class);
private static Map<String, String> updateMap = new HashMap<>();
private static HashMap<String, byte[]> knowledgeFileCache;
private boolean isRunning = true;
public HttpSource(Properties nacosProperties, String NACOS_DATA_ID, String NACOS_GROUP, long NACOS_READ_TIMEOUT, String storePath) {
this.nacosProperties = nacosProperties;
this.NACOS_DATA_ID = NACOS_DATA_ID;
this.NACOS_GROUP = NACOS_GROUP;
this.NACOS_READ_TIMEOUT = NACOS_READ_TIMEOUT;
this.STORE_PATH = storePath;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
//初始化元数据缓存
updateMap = new HashMap<>(16);
//初始化定位库缓存
knowledgeFileCache = new HashMap<>(16);
logger.info("连接nacos" + nacosProperties.getProperty(PropertyKeyConst.SERVER_ADDR));
configService = NacosFactory.createConfigService(nacosProperties);
}
@Override
public void run(SourceContext ctx) throws Exception {
// ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String format = formatter.format(new Date());
logger.info(format + "receive config from nacos:" + config);
System.out.println(format + "receive config from nacos:" + config);
if (StringUtil.isNotBlank(config)) {
ArrayList<Object> metaList = JsonPath.parse(config).read(EXPR);
loadKnowledge(metaList);
}
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
logger.info("receive update config:" + configMsg);
if (StringUtil.isNotBlank(configMsg)) {
ArrayList<Object> metaList = JsonPath.parse(configMsg).read(EXPR);
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
if (!sha256.equals(updateMap.get(fileName))) {
updateMap.put(fileName, sha256);
updateKnowledge(fileName, filePath);
}
}
ctx.collect(knowledgeFileCache);
}
}
} catch (Exception e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
while (isRunning) {
Thread.sleep(10000);
}
}
private void loadKnowledge(ArrayList<Object> metaList) {
InputStream inputStream = null;
try {
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
updateMap.put(fileName, sha256);
knowledgeFileCache.put(fileName, IOUtils.toByteArray(inputStream));
}
}
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
}
}
private void updateKnowledge(String fileName, String filePath) {
InputStream inputStream = null;
FileOutputStream outputStream = null;
try {
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
byte[] bytes = IOUtils.toByteArray(inputStream);
HdfsUtils.uploadFileByBytes(CommonConfig.HDFS_PATH + fileName, bytes);
knowledgeFileCache.put(fileName, bytes);
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
}
}
@Override
public void cancel() {
this.isRunning = false;
}
}

View File

@@ -0,0 +1,6 @@
package com.zdjizhi.source;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
public interface HttpSourceFunction<OUT> extends SourceFunction<OUT> {
}

View File

@@ -0,0 +1,10 @@
package com.zdjizhi.source;
import org.apache.flink.api.common.functions.AbstractRichFunction;
public abstract class RichHttpSourceFunction<OUT> extends AbstractRichFunction implements HttpSourceFunction<OUT> {
private static final long serialVersionUID = 1L;
public RichHttpSourceFunction() {
}
}

View File

@@ -0,0 +1,217 @@
package com.zdjizhi.source;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.json.JSONObject;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.base.Joiner;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.KnowledgeLog;
import com.zdjizhi.utils.*;
import org.apache.commons.io.IOUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.*;
import java.util.concurrent.Executor;
public class SingleHttpSource extends RichHttpSourceFunction<Map<String, byte[]>> {
private static final Logger logger = LoggerFactory.getLogger(HttpSource.class);
private static HashMap<String, byte[]> knowledgeFileCache;
private Properties nacosProperties;
private String NACOS_DATA_ID;
private String NACOS_GROUP;
private long NACOS_READ_TIMEOUT;
private static String STORE_PATH;
private ConfigService configService;
// private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
// private static JavaType listType = jsonMapperInstance.createCollectionType(List.class, KnowledgeLog.class);
private static final String EXPR = "$.[?(@.version=='latest' && @.name in ['ip_v4_built_in','ip_v6_built_in','ip_v4_user_defined','ip_v6_user_defined'])].['name','sha256','format','path']";
private static Map<String, String> updateMap = new HashMap<>();
private boolean isRunning = true;
public SingleHttpSource(Properties nacosProperties, String NACOS_DATA_ID, String NACOS_GROUP, long NACOS_READ_TIMEOUT) {
this.nacosProperties = nacosProperties;
this.NACOS_DATA_ID = NACOS_DATA_ID;
this.NACOS_GROUP = NACOS_GROUP;
this.NACOS_READ_TIMEOUT = NACOS_READ_TIMEOUT;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
logger.info("连接nacos" + nacosProperties.getProperty(PropertyKeyConst.SERVER_ADDR));
configService = NacosFactory.createConfigService(nacosProperties);
//初始化元数据缓存
updateMap = new HashMap<>(16);
//初始化定位库缓存
knowledgeFileCache = new HashMap<>(16);
}
@Override
public void run(SourceContext ctx) throws Exception {
// ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
// List<CustomFile> customFiles = new ArrayList<>();
if (StringUtil.isNotBlank(config)) {
ArrayList<Object> metaList = JsonPath.parse(config).read(EXPR);
loadKnowledge(metaList);
}
// if (StringUtil.isNotBlank(config)) {
// List<KnowledgeLog> knowledgeLogListList = jsonMapperInstance.fromJson(config, listType);
// if (knowledgeLogListList.size()>=1){
// for (KnowledgeLog knowledgeLog : knowledgeLogListList) {
// String name = knowledgeLog.getName().concat(".").concat(knowledgeLog.getFormat());
// String sha256 = knowledgeLog.getSha256();
// updateMap.put(name,sha256);
// }
// }
// }
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
logger.info("receive update config:" + configMsg);
if (StringUtil.isNotBlank(configMsg)) {
ArrayList<Object> metaList = JsonPath.parse(configMsg).read(EXPR);
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
if (!sha256.equals(updateMap.get(fileName))) {
updateMap.put(fileName, sha256);
updateKnowledge(fileName, filePath);
}
}
ctx.collect(knowledgeFileCache);
}
}
} catch (Exception e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
while (isRunning) {
Thread.sleep(10000);
}
}
// private CustomFile loadKnowledge(String fileName, String filePath) {
// InputStream inputStream = null;
// FileOutputStream outputStream = null;
// CustomFile customFile = new CustomFile();
// try {
// customFile.setFileName(fileName);
// Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
// HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
// inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
// FileUtil.mkdir(CommonConfig.DOWNLOAD_PATH);
// File file = new File(CommonConfig.DOWNLOAD_PATH.concat(File.separator).concat(fileName));
// outputStream = new FileOutputStream(file);
// byte[] bytes = IOUtils.toByteArray(inputStream);
// customFile.setContent(bytes);
// inputStream = new ByteArrayInputStream(customFile.getContent());
// IoUtil.copy(inputStream, outputStream);
//
// } catch (IOException ioException) {
// ioException.printStackTrace();
// } finally {
// IOUtils.closeQuietly(inputStream);
// IOUtils.closeQuietly(outputStream);
// }
// return customFile;
// }
private void loadKnowledge(ArrayList<Object> metaList) {
InputStream inputStream = null;
try {
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
updateMap.put(fileName, sha256);
knowledgeFileCache.put(fileName, IOUtils.toByteArray(inputStream));
}
}
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
}
}
private void updateKnowledge(String fileName, String filePath) {
InputStream inputStream = null;
FileOutputStream outputStream = null;
try {
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
FileUtil.mkdir(CommonConfig.DOWNLOAD_PATH);
File file = new File(CommonConfig.DOWNLOAD_PATH.concat(File.separator).concat(fileName));
outputStream = new FileOutputStream(file);
byte[] bytes = IOUtils.toByteArray(inputStream);
knowledgeFileCache.put(fileName, bytes);
inputStream=new ByteArrayInputStream(bytes);
IoUtil.copy(inputStream, outputStream);
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
}
}
@Override
public void cancel() {
this.isRunning = false;
}
}

View File

@@ -0,0 +1,24 @@
package com.zdjizhi.utils;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
public class FileByteUtils {
public static byte[] getFileBytes (String filePath) throws IOException {
File file = new File(filePath);
FileInputStream fis = new FileInputStream(file);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
byte[] b = new byte[1024];
int n;
while ((n = fis.read(b)) != -1) {
bos.write(b, 0, n);
}
fis.close();
byte[] data = bos.toByteArray();
bos.close();
return data;
}
}

View File

@@ -1,6 +1,8 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
@@ -12,7 +14,33 @@ public class FlinkEnvironmentUtils {
static {
streamExeEnv.setParallelism(CommonConfig.STREAM_EXECUTION_ENVIRONMENT_PARALLELISM);
/*
// 每 1000ms 开始一次 checkpoint
streamExeEnv.enableCheckpointing(CommonConfig.FLINK_WINDOW_MAX_TIME * 1000);
// 设置模式为精确一次 (这是默认值)
streamExeEnv.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
// 确认 checkpoints 之间的时间会进行 500 ms
streamExeEnv.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
// Checkpoint 必须在一分钟内完成,否则就会被抛弃
streamExeEnv.getCheckpointConfig().setCheckpointTimeout(60000);
// 允许两个连续的 checkpoint 错误
streamExeEnv.getCheckpointConfig().setTolerableCheckpointFailureNumber(2);
// 同一时间只允许一个 checkpoint 进行
streamExeEnv.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
// 使用 externalized checkpoints这样 checkpoint 在作业取消后仍就会被保留
streamExeEnv.getCheckpointConfig().enableExternalizedCheckpoints(
CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
// 开启实验性的 unaligned checkpoints
streamExeEnv.getCheckpointConfig().enableUnalignedCheckpoints();
*/
}
}

View File

@@ -1,17 +1,10 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
@@ -22,75 +15,8 @@ import java.util.*;
* @author wlh
*/
public class HbaseUtils {
private static final Logger logger = LoggerFactory.getLogger(HbaseUtils.class);
private static Table table = null;
private static Scan scan = null;
private static ArrayList<String> floodTypeList = new ArrayList<>();
static {
floodTypeList.add("TCP SYN Flood");
floodTypeList.add("UDP Flood");
floodTypeList.add("ICMP Flood");
floodTypeList.add("DNS Amplification");
}
private static void prepareHbaseEnv() throws IOException {
org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
config.set("hbase.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
config.set("hbase.client.retries.number", "3");
config.set("hbase.bulkload.retries.number", "3");
config.set("zookeeper.recovery.retry", "3");
config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, CommonConfig.HBASE_CLIENT_OPERATION_TIMEOUT);
config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CommonConfig.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
TableName tableName = TableName.valueOf(CommonConfig.HBASE_BASELINE_TABLE_NAME);
Connection conn = ConnectionFactory.createConnection(config);
table = conn.getTable(tableName);
scan = new Scan().setAllowPartialResults(true).setLimit(CommonConfig.HBASE_BASELINE_TOTAL_NUM);
logger.info("连接hbase成功正在读取baseline数据");
}
public static void main(String[] args) {
Map<String, Map<String, Tuple2<ArrayList<Integer>, Integer>>> baselineMap = readFromHbase();
Set<String> keySet = baselineMap.keySet();
for (String key : keySet) {
Map<String, Tuple2<ArrayList<Integer>, Integer>> stringTuple2Map = baselineMap.get(key);
Set<String> strings = stringTuple2Map.keySet();
for (String s:strings){
Tuple2<ArrayList<Integer>, Integer> arrayListIntegerTuple2 = stringTuple2Map.get(s);
System.out.println(key+"---"+s+"---"+arrayListIntegerTuple2.f0+"---"+arrayListIntegerTuple2.f1);
}
}
System.out.println(baselineMap.size());
}
public static Map<String, Map<String, Tuple2<ArrayList<Integer>, Integer>>> readFromHbase() {
Map<String, Map<String, Tuple2<ArrayList<Integer>, Integer>>> baselineMap = new HashMap<>();
try {
prepareHbaseEnv();
logger.info("开始读取baseline数据");
ResultScanner rs = table.getScanner(scan);
for (Result result : rs) {
Map<String, Tuple2<ArrayList<Integer>, Integer>> floodTypeMap = new HashMap<>();
String rowkey = Bytes.toString(result.getRow());
for (String type:floodTypeList){
ArrayList<Integer> sessionRate = getArraylist(result, type, "session_rate");
if (sessionRate != null && !sessionRate.isEmpty()){
Integer defaultValue = getDefaultValue(result, type, "session_rate_default_value");
floodTypeMap.put(type,Tuple2.of(sessionRate, defaultValue));
}
}
baselineMap.put(rowkey, floodTypeMap);
}
logger.info("格式化baseline数据成功读取IP共{}", baselineMap.size());
} catch (Exception e) {
logger.error("读取hbase数据失败", e);
}
return baselineMap;
}
private static Integer getDefaultValue(Result result, String family, String qualifier) {
public static Integer getIntegerValue(Result result, String family, String qualifier) {
byte[] value = result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier));
if (value != null){
return Bytes.toInt(value);
@@ -98,7 +24,7 @@ public class HbaseUtils {
return 1;
}
private static ArrayList<Integer> getArraylist(Result result, String family, String qualifier) throws IOException {
public static ArrayList<Integer> getArraylist(Result result, String family, String qualifier) throws IOException {
if (containsColumn(result, family, qualifier)) {
ArrayWritable w = new ArrayWritable(IntWritable.class);
w.readFields(new DataInputStream(new ByteArrayInputStream(result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier)))));

View File

@@ -0,0 +1,75 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class HdfsUtils {
private static final Log logger = LogFactory.get();
private static FileSystem fileSystem;
static {
Configuration configuration = new Configuration();
try {
//配置hdfs相关信息
configuration.set("fs.defaultFS","hdfs://ns1");
configuration.set("hadoop.proxyuser.root.hosts","*");
configuration.set("hadoop.proxyuser.root.groups","*");
configuration.set("ha.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
configuration.set("dfs.nameservices","ns1");
configuration.set("dfs.ha.namenodes.ns1","nn1,nn2");
configuration.set("dfs.namenode.rpc-address.ns1.nn1",CommonConfig.HDFS_URI_NS1);
configuration.set("dfs.namenode.rpc-address.ns1.nn2",CommonConfig.HDFS_URI_NS2);
configuration.set("dfs.client.failover.proxy.provider.ns1","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
//指定用户
System.setProperty("HADOOP_USER_NAME", CommonConfig.HDFS_USER);
//创建fileSystem,用于连接hdfs
fileSystem = FileSystem.get(configuration);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static boolean isExists(String filePath) throws IOException {
return fileSystem.exists(new Path(filePath));
}
public static byte[] getFileBytes(String filePath) throws IOException {
try (FSDataInputStream open = fileSystem.open(new Path(filePath))) {
byte[] bytes = new byte[open.available()];
open.read(0, bytes, 0, open.available());
return bytes;
} catch (IOException e) {
logger.error("An I/O exception when files are download from HDFS. Message is :" + e.getMessage());
}
return null;
}
public static void uploadFileByBytes(String filePath,byte[] bytes) throws IOException {
try (FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path(filePath), true)) {
fsDataOutputStream.write(bytes);
fsDataOutputStream.flush();
} catch (RuntimeException e) {
logger.error("Uploading files to the HDFS is abnormal. Message is :" + e.getMessage());
} catch (IOException e) {
logger.error("An I/O exception when files are uploaded to HDFS. Message is :" + e.getMessage());
}
}
public static void rename(String src, String dst) throws IOException {
fileSystem.rename(new Path(src),new Path(dst));
}
}

View File

@@ -0,0 +1,269 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.http.*;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLHandshakeException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
/**
* http client工具类
* @author wlh
*/
public class HttpClientUtils {
/** 全局连接池对象 */
private static final PoolingHttpClientConnectionManager CONN_MANAGER = new PoolingHttpClientConnectionManager();
private static Logger logger = LoggerFactory.getLogger(HttpClientUtils.class);
public static final String ERROR_MESSAGE = "-1";
/*
* 静态代码块配置连接池信息
*/
static {
// 设置最大连接数
CONN_MANAGER.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
CONN_MANAGER.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
}
/**
* 获取Http客户端连接对象
* @return Http客户端连接对象
*/
private static CloseableHttpClient getHttpClient() {
// 创建Http请求配置参数
RequestConfig requestConfig = RequestConfig.custom()
// 获取连接超时时间
.setConnectionRequestTimeout(CommonConfig.HTTP_POOL_REQUEST_TIMEOUT)
// 请求超时时间
.setConnectTimeout(CommonConfig.HTTP_POOL_CONNECT_TIMEOUT)
// 响应超时时间
.setSocketTimeout(CommonConfig.HTTP_POOL_RESPONSE_TIMEOUT)
.build();
/*
* 测出超时重试机制为了防止超时不生效而设置
* 如果直接放回false,不重试
* 这里会根据情况进行判断是否重试
*/
HttpRequestRetryHandler retry = (exception, executionCount, context) -> {
if (executionCount >= 3) {// 如果已经重试了3次就放弃
return false;
}
if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试
return true;
}
if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常
return false;
}
if (exception instanceof UnknownHostException) {// 目标服务器不可达
return false;
}
if (exception instanceof ConnectTimeoutException) {// 连接被拒绝
return false;
}
if (exception instanceof HttpHostConnectException) {// 连接被拒绝
return false;
}
if (exception instanceof SSLException) {// ssl握手异常
return false;
}
if (exception instanceof InterruptedIOException) {// 超时
return true;
}
HttpClientContext clientContext = HttpClientContext.adapt(context);
HttpRequest request = clientContext.getRequest();
// 如果请求是幂等的,就再次尝试
return !(request instanceof HttpEntityEnclosingRequest);
};
ConnectionKeepAliveStrategy myStrategy = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && "timeout".equalsIgnoreCase(param)) {
return Long.parseLong(value) * 1000;
}
}
return 60 * 1000;//如果没有约定则默认定义时长为60s
};
// 创建httpClient
return HttpClients.custom()
// 把请求相关的超时信息设置到连接客户端
.setDefaultRequestConfig(requestConfig)
// 把请求重试设置到连接客户端
.setRetryHandler(retry)
.setKeepAliveStrategy(myStrategy)
// 配置连接池管理对象
.setConnectionManager(CONN_MANAGER)
.build();
}
/**
* GET请求
*
* @param uri 请求地
* @return message
*/
public static String httpGet(URI uri, Header... headers) {
String msg = ERROR_MESSAGE;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();
CloseableHttpResponse response = null;
try {
logger.info("http get uri {}",uri);
// 创建GET请求对象
HttpGet httpGet = new HttpGet(uri);
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpGet.addHeader(h);
logger.info("request header : {}",h);
}
}
// 执行请求
response = httpClient.execute(httpGet);
int statusCode = response.getStatusLine().getStatusCode();
// 获取响应实体
HttpEntity entity = response.getEntity();
// 获取响应信息
msg = EntityUtils.toString(entity, "UTF-8");
if (statusCode != HttpStatus.SC_OK) {
logger.error("Http get content is :{}" , msg);
}
} catch (ClientProtocolException e) {
logger.error("协议错误: {}", e.getMessage());
} catch (ParseException e) {
logger.error("解析错误: {}", e.getMessage());
} catch (IOException e) {
logger.error("IO错误: {}",e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consume(response.getEntity());
response.close();
} catch (IOException e) {
logger.error("释放链接错误: {}", e.getMessage());
}
}
}
return msg;
}
/**
* POST 请求
* @param uri uri参数
* @param requestBody 请求体
* @return post请求返回结果
*/
public static String httpPost(URI uri, String requestBody, Header... headers) {
String msg = ERROR_MESSAGE;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();
// 创建POST请求对象
CloseableHttpResponse response = null;
try {
logger.info("http post uri:{} http post body:{}", uri, requestBody);
HttpPost httpPost = new HttpPost(uri);
httpPost.setHeader("Content-Type", "application/x-www-form-urlencoded");
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpPost.addHeader(h);
logger.info("request header : {}",h);
}
}
if(StringUtil.isNotBlank(requestBody)) {
byte[] bytes = requestBody.getBytes(StandardCharsets.UTF_8);
httpPost.setEntity(new ByteArrayEntity(bytes));
}
response = httpClient.execute(httpPost);
int statusCode = response.getStatusLine().getStatusCode();
// 获取响应实体
HttpEntity entity = response.getEntity();
// 获取响应信息
msg = EntityUtils.toString(entity, "UTF-8");
if (statusCode != HttpStatus.SC_OK) {
logger.error("Http post content is :{}" , msg);
}
} catch (ClientProtocolException e) {
logger.error("协议错误: {}", e.getMessage());
} catch (ParseException e) {
logger.error("解析错误: {}", e.getMessage());
} catch (IOException e) {
logger.error("IO错误: {}", e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consumeQuietly(response.getEntity());
response.close();
} catch (IOException e) {
logger.error("释放链接错误: {}", e.getMessage());
}
}
}
return msg;
}
/**
* 拼装url
* url ,参数map
*/
public static void setUrlWithParams(URIBuilder uriBuilder,String path, Map<String, Object> params) {
try {
uriBuilder.setPath(path);
if (params != null && !params.isEmpty()){
for (Map.Entry<String, Object> kv : params.entrySet()) {
uriBuilder.setParameter(kv.getKey(),kv.getValue().toString());
}
}
} catch (Exception e) {
logger.error("拼接url出错,uri : {}, path : {},参数: {}",uriBuilder.toString(),path,params);
}
}
}

View File

@@ -0,0 +1,234 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.commons.io.IOUtils;
import org.apache.http.*;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.X509Certificate;
import java.util.Map;
import static org.apache.kafka.common.requests.FetchMetadata.log;
/**
* http client工具类
*/
public class HttpClientUtils2 {
/** 全局连接池对象 */
private static final PoolingHttpClientConnectionManager CONN_MANAGER = new PoolingHttpClientConnectionManager();
private static Logger logger = LoggerFactory.getLogger(HttpClientUtils2.class);
public static final String ERROR_MESSAGE = "-1";
/*
* 静态代码块配置连接池信息
*/
static {
// 设置最大连接数
CONN_MANAGER.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
CONN_MANAGER.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
}
/**
* 在调用SSL之前需要重写验证方法取消检测SSL
* 创建ConnectionManager添加Connection配置信息
*
* @return HttpClient 支持https
*/
private PoolingHttpClientConnectionManager getSslClientManager() {
try {
// 在调用SSL之前需要重写验证方法取消检测SSL
X509TrustManager trustManager = new X509TrustManager() {
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
@Override
public void checkClientTrusted(X509Certificate[] xcs, String str) {
}
@Override
public void checkServerTrusted(X509Certificate[] xcs, String str) {
}
};
SSLContext ctx = SSLContext.getInstance(SSLConnectionSocketFactory.TLS);
ctx.init(null, new TrustManager[]{trustManager}, null);
SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(ctx, NoopHostnameVerifier.INSTANCE);
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.INSTANCE)
.register("https", socketFactory).build();
// 创建ConnectionManager添加Connection配置信息
PoolingHttpClientConnectionManager connManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
// 设置最大连接数
connManager.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
connManager.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
return connManager;
} catch (KeyManagementException | NoSuchAlgorithmException e) {
throw new RuntimeException(e.getMessage());
}
}
/**
* 获取Http客户端连接对象
* @return Http客户端连接对象
*/
private CloseableHttpClient getHttpClient() {
// 创建Http请求配置参数
RequestConfig requestConfig = RequestConfig.custom()
// 获取连接超时时间
.setConnectionRequestTimeout(CommonConfig.HTTP_POOL_REQUEST_TIMEOUT)
// 请求超时时间
.setConnectTimeout(CommonConfig.HTTP_POOL_CONNECT_TIMEOUT)
// 响应超时时间
.setSocketTimeout(CommonConfig.HTTP_POOL_RESPONSE_TIMEOUT)
.build();
/*
* 测出超时重试机制为了防止超时不生效而设置
* 如果直接放回false,不重试
* 这里会根据情况进行判断是否重试
*/
HttpRequestRetryHandler retry = (exception, executionCount, context) -> {
if (executionCount >= 3) {// 如果已经重试了3次就放弃
return false;
}
if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试
return true;
}
if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常
return false;
}
if (exception instanceof UnknownHostException) {// 目标服务器不可达
return false;
}
if (exception instanceof ConnectTimeoutException) {// 连接被拒绝
return false;
}
if (exception instanceof HttpHostConnectException) {// 连接被拒绝
return false;
}
if (exception instanceof SSLException) {// ssl握手异常
return false;
}
if (exception instanceof InterruptedIOException) {// 超时
return true;
}
HttpClientContext clientContext = HttpClientContext.adapt(context);
HttpRequest request = clientContext.getRequest();
// 如果请求是幂等的,就再次尝试
return !(request instanceof HttpEntityEnclosingRequest);
};
ConnectionKeepAliveStrategy myStrategy = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && "timeout".equalsIgnoreCase(param)) {
return Long.parseLong(value) * 1000;
}
}
return 60 * 1000;//如果没有约定则默认定义时长为60s
};
// 创建httpClient
return HttpClients.custom()
// 把请求相关的超时信息设置到连接客户端
.setDefaultRequestConfig(requestConfig)
// 把请求重试设置到连接客户端
.setRetryHandler(retry)
.setKeepAliveStrategy(myStrategy)
// 配置连接池管理对象
.setConnectionManager(getSslClientManager())
.build();
}
// TODO: 2022/10/19 加载知识库
public InputStream httpGetInputStream(String url, int socketTimeout, Header... headers) {
InputStream result = null;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();// TODO: 2022/10/19 去掉了 socketTimeout
// 创建GET请求对象
HttpGet httpGet = new HttpGet(url);
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpGet.addHeader(h);
}
}
CloseableHttpResponse response = null;
try {
// 执行请求
response = httpClient.execute(httpGet);
// 获取响应实体
result = IOUtils.toBufferedInputStream(response.getEntity().getContent());
// 获取响应信息
EntityUtils.consume(response.getEntity());
} catch (ClientProtocolException e) {
log.error("current file: {},Protocol error:{}", url, e.getMessage());
} catch (ParseException e) {
log.error("current file: {}, Parser error:{}", url, e.getMessage());
} catch (IOException e) {
log.error("current file: {},IO error:{}", url, e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consume(response.getEntity());
response.close();
} catch (IOException e) {
log.error("Release Connection error:{}", e.getMessage());
}
}
return result;
}
}
}

View File

@@ -0,0 +1,21 @@
package com.zdjizhi.utils;
import lombok.Data;
/**
* @author fy
* @version 1.0
* @date 2022/10/19 18:27
*/
@Data
public class IpLocationConfiguration {
private String ipV4UserDefined;
private String ipV4BuiltIn;
private String ipV6UserDefined;
private String ipV6BuiltIn;
}

View File

@@ -1,26 +1,112 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
public class IpUtils {
public static IpLookupV2 ipLookup ;
private static Logger LOG = LoggerFactory.getLogger(IpUtils.class);
/**
* IP定位库工具类
*/
public static IpLookup ipLookup = new IpLookup.Builder(false)
.loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4.mmdb")
.loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6.mmdb")
.loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_private_v4.mmdb")
.loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_private_v6.mmdb")
.build();
// public static IpLookupV2 ipLookup = new IpLookupV2.Builder(false)
// .loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4_built_in.mmdb")
// .loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6_built_in.mmdb")
// .loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_v4_user_defined.mmdb")
// .loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_v6_user_defined.mmdb")
// .build();
public static void loadIpLook(){
try {
IpLookupV2.Builder builder = new IpLookupV2.Builder(false);
if ("CLUSTER".equals(CommonConfig.CLUSTER_OR_SINGLE)) {
byte[] ipv4BuiltBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v4_built_in.mmdb");
if (ipv4BuiltBytes!=null){
InputStream ipv4BuiltInputStream = new ByteArrayInputStream(ipv4BuiltBytes);
builder.loadDataFileV4(ipv4BuiltInputStream);
}
byte[] ipv6BuiltBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v6_built_in.mmdb");
if (ipv6BuiltBytes!=null){
InputStream ipv6BuiltInputStream = new ByteArrayInputStream(ipv6BuiltBytes);
builder.loadDataFileV6(ipv6BuiltInputStream);
}
byte[] ipv4UserBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v4_user_defined.mmdb");
if (ipv4UserBytes!=null){
InputStream ipv4UserInputStream = new ByteArrayInputStream(ipv4UserBytes);
builder.loadDataFilePrivateV4(ipv4UserInputStream);
}
byte[] ipv6UserBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v6_user_defined.mmdb");
if (ipv6UserBytes!=null){
InputStream ipv6UserInputStream = new ByteArrayInputStream(ipv6UserBytes);
builder.loadDataFilePrivateV6(ipv6UserInputStream);
}
}else if ("SINGLE".equals(CommonConfig.CLUSTER_OR_SINGLE)){
byte[] ipv4BuiltBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v4_built_in.mmdb");
if (ipv4BuiltBytes!=null){
InputStream ipv4BuiltInputStream = new ByteArrayInputStream(ipv4BuiltBytes);
builder.loadDataFileV4(ipv4BuiltInputStream);
}
byte[] ipv6BuiltBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v6_built_in.mmdb");
if (ipv6BuiltBytes!=null){
InputStream ipv6BuiltInputStream = new ByteArrayInputStream(ipv6BuiltBytes);
builder.loadDataFileV6(ipv6BuiltInputStream);
}
byte[] ipv4UserBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v4_user_defined.mmdb");
if (ipv4UserBytes!=null){
InputStream ipv4UserInputStream = new ByteArrayInputStream(ipv4UserBytes);
builder.loadDataFilePrivateV4(ipv4UserInputStream);
}
byte[] ipv6UserBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v6_user_defined.mmdb");
if (ipv6UserBytes!=null){
InputStream ipv6UserInputStream = new ByteArrayInputStream(ipv6UserBytes);
builder.loadDataFilePrivateV6(ipv6UserInputStream);
}
}
ipLookup = builder.build();
}catch (Exception e){
LOG.error("加载失败",e);
}
}
public static void updateIpLook(Map<String, byte[]> knowledgeFileCache){
try{
IpLookupV2.Builder builder = new IpLookupV2.Builder(false);
ipLookup= builder.loadDataFileV4(new ByteArrayInputStream(knowledgeFileCache.get("ip_v4_built_in.mmdb")))
.loadDataFileV6(new ByteArrayInputStream(knowledgeFileCache.get("ip_v6_built_in.mmdb")))
.loadDataFilePrivateV4(new ByteArrayInputStream(knowledgeFileCache.get("ip_v4_user_defined.mmdb")))
.loadDataFilePrivateV6(new ByteArrayInputStream(knowledgeFileCache.get("ip_v6_user_defined.mmdb")))
.build();
}catch (Exception e){
LOG.error("加载失败",e);
}
}
public static void main(String[] args) {
System.out.println(ipLookup.countryLookup("49.7.115.37"));
// String ips = "192.168.50.23,192.168.50.45,192.168.56.9,192.168.56.8,192.168.50.58,192.168.56.7,192.168.56.6,192.168.50.40,192.168.50.19,192.168.50.6,192.168.50.4,192.168.56.17,192.168.50.27,192.168.50.26,192.168.50.18,192.168.56.3,192.168.56.10";
// for (String ip:ips.split(",")){
// System.out.println(ip+"--"+ipLookup.countryLookup(ip));
// }
String ips = "182.168.50.23,182.168.50.45,182.168.56.9,182.168.56.8,92.168.50.58,19.168.56.7,12.168.56.6,2.168.50.40,1.168.50.19,9.168.50.6,2.168.50.4,192.168.56.17,192.168.50.27,192.168.50.26,192.168.50.18,192.168.56.3,192.168.56.10";
for (String ip:ips.split(",")){
System.out.println(ip+"--"+ipLookup.countryLookup(ip));
}
}

View File

@@ -4,26 +4,32 @@ import com.zdjizhi.common.CommonConfig;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import java.util.Optional;
import java.util.Properties;
public class KafkaUtils {
private static Properties getKafkaSinkProperty(){
Properties propertiesproducer = new Properties();
propertiesproducer.setProperty("bootstrap.servers", CommonConfig.KAFKA_OUTPUT_BOOTSTRAP_SERVERS);
// propertiesproducer.setProperty("security.protocol", "SASL_PLAINTEXT");
// propertiesproducer.setProperty("sasl.mechanism", "PLAIN");
// propertiesproducer.setProperty("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"admin\" password=\"galaxy2019\";");
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", CommonConfig.KAFKA_OUTPUT_BOOTSTRAP_SERVERS);
if (CommonConfig.SASL_JAAS_CONFIG_FLAG == 1){
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "PLAIN");
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\""+CommonConfig.SASL_JAAS_CONFIG_USER+"\" password=\""+CommonConfig.SASL_JAAS_CONFIG_PASSWORD+"\";");
}
return propertiesproducer;
return properties;
}
public static FlinkKafkaProducer<String> getKafkaSink(String topic){
return new FlinkKafkaProducer<String>(
FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<>(
topic,
new SimpleStringSchema(),
getKafkaSinkProperty()
getKafkaSinkProperty(),
Optional.empty()
);
kafkaProducer.setLogFailuresOnly(true);
return kafkaProducer;
}
}

View File

@@ -0,0 +1,91 @@
package com.zdjizhi.utils;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.StringReader;
import java.util.Properties;
import java.util.concurrent.Executor;
public class NacosUtils {
private static final Logger logger = LoggerFactory.getLogger(NacosUtils.class);
private static Properties nacosProperties = new Properties();
private static Properties commonProperties = new Properties();
private static final String NACOS_SERVER_ADDR = CommonConfigurations.getStringProperty("nacos.server.addr");
private static final String NACOS_STATIC_NAMESPACE = CommonConfigurations.getStringProperty("nacos.static.namespace");
private static final String NACOS_USERNAME = CommonConfigurations.getStringProperty("nacos.username");
private static final String NACOS_PASSWORD = CommonConfigurations.getStringProperty("nacos.password");
private static final String NACOS_STATIC_DATA_ID = CommonConfigurations.getStringProperty("nacos.static.data.id");
private static final String NACOS_STATIC_GROUP = CommonConfigurations.getStringProperty("nacos.static.group");
private static final long NACOS_READ_TIMEOUT = CommonConfigurations.getLongProperty("nacos.read.timeout");
static {
createConfigService();
}
private static void getProperties() {
nacosProperties.setProperty(PropertyKeyConst.SERVER_ADDR, NACOS_SERVER_ADDR);
nacosProperties.setProperty(PropertyKeyConst.NAMESPACE, NACOS_STATIC_NAMESPACE);
nacosProperties.setProperty(PropertyKeyConst.USERNAME, NACOS_USERNAME);
nacosProperties.setProperty(PropertyKeyConst.PASSWORD, NACOS_PASSWORD);
}
private static void createConfigService() {
try {
getProperties();
ConfigService configService = NacosFactory.createConfigService(nacosProperties);
String config = configService.getConfig(NACOS_STATIC_DATA_ID, NACOS_STATIC_GROUP, NACOS_READ_TIMEOUT);
commonProperties.load(new StringReader(config));
configService.addListener(NACOS_STATIC_DATA_ID, NACOS_STATIC_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
commonProperties.clear();
commonProperties.load(new StringReader(configMsg));
} catch (IOException e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
} catch (Exception e) {
e.printStackTrace();
logger.error("获取nacos配置失败", e);
}
}
public static String getStringProperty(String key) {
return commonProperties.getProperty(key);
}
public static Integer getIntProperty(String key) {
return Integer.parseInt(commonProperties.getProperty(key));
}
public static Double getDoubleProperty(String key) {
return Double.parseDouble(commonProperties.getProperty(key));
}
public static Long getLongProperty(String key) {
return Long.parseLong(commonProperties.getProperty(key));
}
public static Boolean getBooleanProperty(String key) {
return "true".equals(commonProperties.getProperty(key).toLowerCase().trim());
}
}

View File

@@ -8,42 +8,38 @@ stream.execution.job.name=DOS-DETECTION-APPLICATION
kafka.input.parallelism=1
#输入kafka topic名
kafka.input.topic.name=DOS-SKETCH-LOG
kafka.input.topic.name=DOS-SKETCH-RECORD
#输入kafka地址
#kafka.input.bootstrap.servers=192.168.44.12:9092
kafka.input.bootstrap.servers=192.168.44.11:9092,192.168.44.14:9092,192.168.44.15:9092
#kafka.input.bootstrap.servers=192.168.44.12:9094
kafka.input.bootstrap.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#读取kafka group id
kafka.input.group.id=2109061121
kafka.input.group.id=2112080949
#kafka.input.group.id=dos-detection-job-210813-1
#发送kafka metrics并行度大小
kafka.output.metric.parallelism=1
#发送kafka metrics topic名
#kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS-LOG
#kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS
kafka.output.metric.topic.name=test
#发送kafka event并行度大小
kafka.output.event.parallelism=1
#发送kafka event topic名
#kafka.output.event.topic.name=DOS-EVENT-LOG
kafka.output.event.topic.name=test
#sketch日志 topic名以及并行度
#kafka.output.sketch.topic.name=FLATTEN-DOS-SKETCH-LOG
kafka.output.sketch.topic.name=test
kafka.output.sketch.parallelism=1
#kafka.output.event.topic.name=DOS-EVENT
kafka.output.event.topic.name=abcd
#kafka输出地址
kafka.output.bootstrap.servers=192.168.44.33:9092
kafka.output.bootstrap.servers=192.168.44.12:9094
#kafka.output.bootstrap.servers=192.168.44.11:9092,192.168.44.14:9092,192.168.44.15:9092
#zookeeper地址
#hbase.zookeeper.quorum=192.168.44.12:2181
hbase.zookeeper.quorum=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
hbase.zookeeper.quorum=192.168.44.12:2181
#hbase.zookeeper.quorum=192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181
#hbase.zookeeper.quorum=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase客户端处理时间
hbase.client.operation.timeout=30000
@@ -55,6 +51,9 @@ hbase.baseline.table.name=dos:ddos_traffic_baselines
#读取baseline限制
hbase.baseline.total.num=1000000
#baseline ttl单位
hbase.baseline.ttl=30
#设置聚合并行度2个key
flink.first.agg.parallelism=1
@@ -62,7 +61,7 @@ flink.first.agg.parallelism=1
flink.detection.map.parallelism=1
#watermark延迟
flink.watermark.max.orderness=1
flink.watermark.max.orderness=10
#计算窗口大小默认600s
flink.window.max.time=10
@@ -76,18 +75,123 @@ destination.ip.partition.num=10000
data.center.id.num=15
#IP mmdb库路径
ip.mmdb.path=D:\\data\\dat\\
#ip.mmdb.path=ip.mmdb.path=/home/ceiec/topology/dat/
ip.mmdb.path=D:\\data\\dat\\bak\\
#ip.mmdb.path=/home/bigdata/topology/dat/
#ip.mmdb.path=/home/bigdata/wlh/topology/dos-detection/dat/
#敏感阈值,速率小于此值不报警
sensitivity.threshold=100
#bifang服务访问地址
bifang.server.uri=http://192.168.44.72:80
#bifang.server.uri=http://192.168.44.3:80
#基于baseline判定dos攻击的上下限
baseline.sessions.minor.threshold=0.1
baseline.sessions.warning.threshold=0.5
baseline.sessions.major.threshold=1
baseline.sessions.severe.threshold=3
baseline.sessions.critical.threshold=8
#访问bifang只读权限tokenbifang内置无需修改
bifang.server.token=ed04b942-7df4-4e3d-b9a9-a881ca98a867
#加密密码路径信息
bifang.server.encryptpwd.path=/v1/user/encryptpwd
#登录bifang服务路径信息
bifang.server.login.path=/v1/user/login
#获取vaysId路径信息
bifang.server.policy.vaysid.path=/v1/system/vsys/
#获取静态阈值路径信息
bifang.server.policy.threshold.path=/v1/policy/profile/DoS/detection/threshold
#http请求相关参数
#最大连接数
http.pool.max.connection=400
#单路由最大连接数
http.pool.max.per.route=80
#向服务端请求超时时间设置(单位:毫秒)
http.pool.request.timeout=60000
#向服务端连接超时时间设置(单位:毫秒)
http.pool.connect.timeout=60000
#服务端响应超时时间设置(单位:毫秒)
http.pool.response.timeout=60000
#获取静态阈值周期,默认十分钟
static.threshold.schedule.minutes=10
#获取baseline周期默认7天
baseline.threshold.schedule.days=7
baseline.threshold.schedule.days=1
#kafka用户认证配置参数
sasl.jaas.config.user=admin
#sasl.jaas.config.password=galaxy2019
#sasl.jaas.config.password=ENC(6MleDyA3Z73HSaXiKsDJ2k7Ys8YWLhEJ)
sasl.jaas.config.password=6MleDyA3Z73HSaXiKsDJ2k7Ys8YWLhEJ
#是否开启kafka用户认证配置10
sasl.jaas.config.flag=1
#nacos配置
#nacos.server.addr=192.168.44.12:8848
#nacos.namespace=public
#nacos.username=nacos
#nacos.password=nacos
#nacos.data.id=knowledge_base.json
#nacos.group=DEFAULT_GROUP
#nacos.read.timeout=5000
############################## Nacos 配置 ######################################
nacos.server.addr=192.168.44.12:8848
nacos.username=nacos
nacos.password=nacos
nacos.read.timeout=5000
############################## Nacos ---知识库配置 ######################################
nacos.namespace=public
nacos.data.id=knowledge_base.json
nacos.group=DEFAULT_GROUP
############################## Nacos ---静态阈值配置 ######################################
nacos.static.namespace=test
nacos.static.data.id=dos_detection.properties
nacos.static.group=Galaxy
############################## HTTP 配置 ######################################
#http请求相关参数
#最大连接数
#http.pool.max.connection=400
#
##单路由最大连接数
#http.pool.max.per.route=80
#
##向服务端请求超时时间设置(单位:毫秒)
#http.pool.request.timeout=60000
#
##向服务端连接超时时间设置(单位:毫秒)
#http.pool.connect.timeout=60000
#
##服务端响应超时时间设置(单位:毫秒)
#http.pool.response.timeout=60000
#server.uri=http://192.168.44.12:9098
#server.path=/hos/knowledge_base_hos_bucket
############################## hos Token 配置 ######################################
hos.token=c21f969b5f03d33d43e04f8f136e7682
############################# 选择集群模式或者单机模式 配置 ######################################
cluster.or.single=CLUSTER
#cluster.or.single=SINGLE
############################## 集群模式配置文件路径 配置 ######################################
hdfs.path=/test/TEST/
hdfs.uri.nn1=hdfs://192.168.40.151:9000
hdfs.uri.nn2=hdfs://192.168.40.152:9000
hdfs.user=dos
############################## 单机模式配置文件下载路径 配置 ######################################
download.path=D:\\ttt\\

View File

@@ -0,0 +1,58 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/tsg/olap/hadoop/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.logfile.size</name>
<value>10000000</value>
<description>The max size of each log file</description>
</property>
<property>
<name>hadoop.logfile.count</name>
<value>1</value>
<description>The max number of log files</description>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181</value>
</property>
<property>
<name>ipc.client.connect.timeout</name>
<value>90000</value>
</property>
</configuration>

View File

@@ -0,0 +1,142 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/tsg/olap/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/tsg/olap/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址nn1所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>192.168.40.151:9000</value>
</property>
<!-- nn1的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>192.168.40.151:50070</value>
</property>
<!-- nn2的RPC通信地址nn2所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>192.168.40.152:9000</value>
</property>
<!-- nn2的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>192.168.40.152:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://192.168.40.151:8485;192.168.40.152:8485;192.168.40.203:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/tsg/olap/hadoop/journal</value>
</property>
<!--客户端通过代理访问namenode访问文件系统HDFS 客户端与Active 节点通信的Java 类使用其确定Active 节点是否活跃 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(true)</value>
</property>
<!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间这个属性同上如果你是用脚本的方法切换这个应该是可以不配置的 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>8192</value>
</property>
<!-- namenode处理RPC请求线程数增大该值资源占用不大 -->
<property>
<name>dfs.namenode.handler.count</name>
<value>30</value>
</property>
<!-- datanode处理RPC请求线程数增大该值会占用更多内存 -->
<property>
<name>dfs.datanode.handler.count</name>
<value>40</value>
</property>
<!-- balance时可占用的带宽 -->
<property>
<name>dfs.balance.bandwidthPerSec</name>
<value>104857600</value>
</property>
<!-- 磁盘预留空间该空间不会被hdfs占用单位字节-->
<property>
<name>dfs.datanode.du.reserved</name>
<value>5368709120</value>
</property>
<!-- datanode与namenode连接超时时间单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
<property>
<name>heartbeat.recheck.interval</name>
<value>100000</value>
</property>
</configuration>

View File

@@ -0,0 +1,196 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>rmcluster</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rsm1,rsm2</value>
</property>
<!-- 配置rm1-->
<property>
<name>yarn.resourcemanager.hostname.rsm1</name>
<value>192.168.40.152</value>
</property>
<property>
<!--<name>yarn.resourcemanager.hostname.rm1</name>-->
<name>yarn.resourcemanager.address.rsm1</name>
<value>192.168.40.152:9916</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rsm1</name>
<value>192.168.40.152:9917</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rsm1</name>
<value>192.168.40.152:9918</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rsm1</name>
<value>192.168.40.152:9919</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
<value>192.168.40.152:9920</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rsm1</name>
<value>192.168.40.152:23142</value>
</property>
<!-- 配置rm2-->
<property>
<name>yarn.resourcemanager.hostname.rsm2</name>
<value>192.168.40.203</value>
</property>
<property>
<!--<name>yarn.resourcemanager.hostname.rm1</name>-->
<name>yarn.resourcemanager.address.rsm2</name>
<value>192.168.40.203:9916</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rsm2</name>
<value>192.168.40.203:9917</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rsm2</name>
<value>192.168.40.203:9918</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rsm2</name>
<value>192.168.40.203:9919</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
<value>192.168.40.203:9920</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rsm2</name>
<value>192.168.40.203:23142</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181</value>
</property>
<!--启用自动恢复当任务进行一半rm坏掉就要启动自动恢复默认是false-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--启动nm自动恢复当集群重启container开启自动恢复保障任务的可靠性默认为false启动该配置需开启下面配置 -->
<property>
<name>yarn.nodemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--开启nm故障恢复后nm元数据存储路径 -->
<property>
<name>yarn.nodemanager.recovery.dir</name>
<value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
</property>
<!--启用nm恢复时监控功能开启后不会去尝试清理container默认false -->
<property>
<name>yarn.nodemanager.recovery.supervised</name>
<value>true</value>
</property>
<!--配置nm可用的RPC地址默认${yarn.nodemanager.hostname}:0为临时端口。集群重启后nm与rm连接的端口会变化这里指定端口保障nm restart功能 -->
<property>
<name>yarn.nodemanager.address</name>
<value>${yarn.nodemanager.hostname}:9923</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>30720</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>30720</value>
</property>
<!--开启日志聚合 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.nodemanager.heartbeat-interval-ms</name>
<value>3000</value>
</property>
<!--日志保留7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<property>
<name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
<value>3600</value>
</property>
<property>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/tmp/logs</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>14</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-vcores</name>
<value>1</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-vcores</name>
<value>14</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.disk-health-checker.enable</name>
<value>false</value>
</property>
<!--ApplicationMaster重启次数配置HA后默认为2-->
<property>
<name>yarn.resourcemanager.am.max-attempts</name>
<value>10000</value>
</property>
<property>
<name>yarn.log.server.url</name>
<value>http://bigdata-151:19888/jobhistory/logs</value>
</property>
</configuration>

View File

@@ -0,0 +1,7 @@
package com.zdjizhi.common;
public class HttpTest {
public static void main(String[] args) throws Exception {
}
}

View File

@@ -0,0 +1,106 @@
package com.zdjizhi.common;
import inet.ipaddr.Address;
import inet.ipaddr.AddressStringException;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import inet.ipaddr.format.util.AddressTrieMap;
import inet.ipaddr.format.util.AssociativeAddressTrie;
import inet.ipaddr.ipv4.IPv4Address;
import inet.ipaddr.ipv4.IPv4AddressAssociativeTrie;
import org.apache.flink.shaded.guava18.com.google.common.collect.Range;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class IpTest {
public static void main(String[] args) throws Exception {
IPv4AddressAssociativeTrie<Integer> trie = new IPv4AddressAssociativeTrie<>();
IPAddress str1 = new IPAddressString("1.2.3.4").getAddress();
IPAddress str2 = new IPAddressString("10.0.0.0/15").getAddress();
IPAddress str3 = new IPAddressString("25.4.2.0/23").getAddress();
IPAddress str4 = new IPAddressString("192.168.8.0/21").getAddress();
IPAddress str5 = new IPAddressString("240.0.0.0/4").getAddress();
IPAddress str6 = new IPAddressString("fc00::0/64").getAddress();
IPAddress str7 = new IPAddressString("fc00::10:1").getAddress();
TreeRangeMap<IPAddress, Object> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.closed(str1.getLower(),str1.getUpper()),1);
rangeMap.put(Range.closed(str2.getLower(),str2.getUpper()),2);
rangeMap.put(Range.closed(str3.getLower(),str3.getUpper()),3);
rangeMap.put(Range.closed(str4.getLower(),str4.getUpper()),4);
rangeMap.put(Range.closed(str5.getLower(),str5.getUpper()),5);
rangeMap.put(Range.closed(str6.getLower(),str6.getUpper()),6);
rangeMap.put(Range.closed(str7.getLower(),str7.getUpper()),7);
IPAddress pv4 = new IPAddressString("255.255.14.255").getAddress();
IPAddress pv42 = new IPAddressString("1.2.3.4").getAddress();
IPAddress pv43 = new IPAddressString("fc00::").getAddress();
IPAddress pv44 = new IPAddressString("fc00::10:1").getAddress();
IPAddress pv45 = new IPAddressString("192.168.42.1").getAddress();
IPAddress pv46 = new IPAddressString("192.168.42.1/32").getAddress();
IPAddress pv47 = new IPAddressString("12.56.4.0").getAddress();
IPAddress mask = pv45.getNetwork().getNetworkMask(24, false);
System.out.println(pv45.isMultiple());
System.out.println(pv46.isMultiple());
System.out.println(pv46.isPrefixed());
System.out.println(pv47.isPrefixed());
System.out.println(pv45+"---"+pv45.toMaxHost().withoutPrefixLength()+"---"+pv45.adjustPrefixLength(pv45.getBitCount()));
System.out.println(pv45+"---mask:"+pv45.mask(mask).toString());
System.out.println(pv45.adjustPrefixLength(pv45.getBitCount())+"---"+pv45.toMaxHost().withoutPrefixLength());
/*
System.out.println(str5.getUpper()+"---"+str5.getLower());
System.out.println(rangeMap.span().contains(pv4));
System.out.println(rangeMap.get(pv4));
System.out.println(rangeMap.get(pv42));
System.out.println(rangeMap.get(pv43));
System.out.println(rangeMap.get(pv44));
*/
/*
System.out.println(str5.toSequentialRange());
// System.out.println(str2.contains(new IPAddressString("10.0.0.2")));
// System.out.println(str5.toAddress().toIPv4().toSequentialRange());
trie.put(str1,1);
trie.put(str2,2);
trie.put(str3,3);
trie.put(str4,4);
trie.put(str5,5);
AddressTrieMap<IPv4Address, Integer> trieMap = new AddressTrieMap<>(trie);
trieMap.forEach((k,v) -> {
System.out.println(k.toString() + "--" + v);
});
System.out.println("-----------------");
trie.forEach((k) -> System.out.println(k.toString()));
System.out.println(str5.contains(pv4));
System.out.println(trie.contains(pv4));
System.out.println(trieMap.get(pv4));
System.out.println(trieMap.containsKey(pv4));
// System.out.println(trieMap.getRange());
// IPAddress str3 = new IPAddressString("fc00::10:1").getAddress();
// IPAddress str4 = new IPAddressString("fc00::10:2/64").getAddress();
// System.out.println(Arrays.toString(str1.mergeToPrefixBlocks(str2,str3,str4)));
*/
}
}

View File

@@ -0,0 +1,101 @@
package com.zdjizhi.common;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.alibaba.nacos.api.exception.NacosException;
import org.junit.Test;
import java.io.IOException;
import java.io.StringReader;
import java.util.Properties;
import java.util.concurrent.Executor;
/**
* @author qidaijie
* @Package com.zdjizhi
* @Description:
* @date 2022/3/1016:58
*/
public class NacosTest {
/**
* <dependency>
* <groupId>com.alibaba.nacos</groupId>
* <artifactId>nacos-client</artifactId>
* <version>1.2.0</version>
* </dependency>
*/
private static Properties properties = new Properties();
/**
* config data id = config name
*/
private static final String DATA_ID = "dos_baseline.properties";
/**
* config group
*/
private static final String GROUP = "Galaxy";
private void getProperties() {
properties.setProperty(PropertyKeyConst.SERVER_ADDR, "192.168.44.12:8848");
properties.setProperty(PropertyKeyConst.NAMESPACE, "test");
properties.setProperty(PropertyKeyConst.USERNAME, "nacos");
properties.setProperty(PropertyKeyConst.PASSWORD, "nacos");
}
@Test
public void GetConfigurationTest() {
try {
getProperties();
ConfigService configService = NacosFactory.createConfigService(properties);
String content = configService.getConfig(DATA_ID, GROUP, 5000);
Properties nacosConfigMap = new Properties();
nacosConfigMap.load(new StringReader(content));
System.out.println(nacosConfigMap.getProperty("static.sensitivity.threshold"));
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void ListenerConfigurationTest() {
getProperties();
try {
//first get config
ConfigService configService = NacosFactory.createConfigService(properties);
String config = configService.getConfig(DATA_ID, GROUP, 5000);
// System.out.println(config);
//start listenner
configService.addListener(DATA_ID, GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
System.out.println(configMsg);
}
});
} catch (Exception e) {
e.printStackTrace();
}
//keep running,change nacos config,print new config
/*
while (true) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
*/
}
}