16 Commits

Author SHA1 Message Date
unknown
11747d9964 GAL-296 解决使用Yarn模式运行时的依赖冲突问题 2023-03-07 18:58:25 +08:00
wanglihui
ce15a27a1b TSG-13094 修复DoS Event日志出现MVsys id 2022-12-21 17:11:14 +08:00
wanglihui
01bbe562c9 Merge branch 'tsg-22.11' of git.mesalab.cn:bigdata/tsg/flink-dos-detection into tsg-22.11 2022-12-19 10:18:44 +08:00
wanglihui
f07651cf14 修改vsys id字段名为common_t_vsys_id 2022-12-19 10:18:17 +08:00
unknown
7c201a8a3f 新增Nacos Namespace配置,删除更新至HDFS时Flush操作 2022-12-16 16:52:33 +08:00
wanglihui
78435d54ea Merge branch 'knowledge' of https://git.mesalab.cn/bigdata/tsg/flink-dos-detection into tsg-22.11 2022-12-06 19:11:30 +08:00
wanglihui
76c9247bb9 Merge branch 'knowledge' of https://git.mesalab.cn/bigdata/tsg/flink-dos-detection into tsg-22.11 2022-12-06 19:10:28 +08:00
wanglihui
488b7c6644 修改部分日志输出 2022-12-06 17:13:09 +08:00
unknown
0662d265dd GAL-224 取消SSL检测,新增HDFS高可用设置 2022-11-28 16:42:56 +08:00
unknown
87fe11dc93 优化单机模式落地方式 2022-11-28 15:38:36 +08:00
unknown
9a2a5b3957 GAL-224 DoS检测支持知识库动态加载 2022-11-23 15:30:24 +08:00
fy
c58acdcfc9 Flink连接知识库实现方案初始准备 2022-11-14 14:34:00 +08:00
wanglihui
b409150532 修复提交yarn集群依赖冲突bug 2022-10-17 18:15:11 +08:00
wanglihui
7e6d5fcfc5 修复无法vsys id 2022-10-17 11:10:35 +08:00
wanglihui
859cd379e5 DoS 检测支持vsys id 2022-09-23 18:37:33 +08:00
wanglihui
47ddef9bca DoS 检测事件日志默认VSYS ID 为 1 2022-08-19 10:17:52 +08:00
37 changed files with 1883 additions and 320 deletions

92
pom.xml
View File

@@ -12,6 +12,8 @@
<flink.version>1.13.1</flink.version>
<hive.version>2.1.1</hive.version>
<hadoop.version>2.7.1</hadoop.version>
<scala.binary.version>2.11</scala.binary.version>
<jsonpath.version>2.4.0</jsonpath.version>
</properties>
<repositories>
@@ -96,8 +98,27 @@
</execution>
</executions>
</plugin>
</plugins>
<resources>
<resource>
<directory>properties</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
</includes>
<filtering>false</filtering>
</resource>
<resource>
<directory>src\main</directory>
<includes>
<include>log4j.properties</include>
</includes>
<filtering>false</filtering>
</resource>
</resources>
</build>
<dependencies>
@@ -109,16 +130,22 @@
<version>1.9.3</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.21</version>
</dependency>
<!--<dependency>-->
<!--<groupId>org.slf4j</groupId>-->
<!--<artifactId>slf4j-api</artifactId>-->
<!--<version>1.7.21</version>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.slf4j</groupId>-->
<!--<artifactId>slf4j-log4j12</artifactId>-->
<!--<version>1.7.21</version>-->
<!--</dependency>-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.21</version>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>${jsonpath.version}</version>
</dependency>
<dependency>
@@ -139,6 +166,7 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.1</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<artifactId>zookeeper</artifactId>
@@ -152,9 +180,20 @@
<artifactId>guava</artifactId>
<groupId>com.google.guava</groupId>
</exclusion>
<exclusion>
<artifactId>slf4j-api</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<dependency>
<groupId>org.apache.hbase</groupId>
@@ -170,6 +209,14 @@
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>slf4j-api</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>hadoop-common</artifactId>
<groupId>org.apache.hadoop</groupId>
</exclusion>
</exclusions>
</dependency>
@@ -210,7 +257,7 @@
<dependency>
<groupId>com.zdjizhi</groupId>
<artifactId>galaxy</artifactId>
<version>1.0.8</version>
<version>1.1.1</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
@@ -244,6 +291,10 @@
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<artifactId>slf4j-api</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-codec/commons-codec -->
@@ -271,6 +322,27 @@
<artifactId>guava</artifactId>
<version>22.0</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.2</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>2.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
</dependency>
</dependencies>

View File

@@ -1,7 +1,6 @@
package com.zdjizhi.common;
import com.zdjizhi.utils.CommonConfigurations;
import com.zdjizhi.utils.NacosUtils;
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
/**
@@ -10,6 +9,11 @@ import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
*/
public class CommonConfig {
/**
* 定位库默认分隔符
*/
public static final String LOCATION_SEPARATOR = ".";
private static StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
static {
@@ -49,17 +53,7 @@ public class CommonConfig {
public static final String IP_MMDB_PATH = CommonConfigurations.getStringProperty("ip.mmdb.path");
// public static final int STATIC_SENSITIVITY_THRESHOLD = NacosUtils.getIntProperty("static.sensitivity.threshold");
// public static final double BASELINE_SENSITIVITY_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sensitivity.threshold");
//
// public static final double BASELINE_SESSIONS_MINOR_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.minor.threshold");
// public static final double BASELINE_SESSIONS_WARNING_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.warning.threshold");
// public static final double BASELINE_SESSIONS_MAJOR_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.major.threshold");
// public static final double BASELINE_SESSIONS_SEVERE_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.severe.threshold");
// public static final double BASELINE_SESSIONS_CRITICAL_THRESHOLD = NacosUtils.getDoubleProperty("baseline.sessions.critical.threshold");
public static final String BIFANG_SERVER_URI = CommonConfigurations.getStringProperty("bifang.server.uri");
public static final String BIFANG_SERVER_TOKEN = CommonConfigurations.getStringProperty("bifang.server.token");
public static final String BIFANG_SERVER_ENCRYPTPWD_PATH = CommonConfigurations.getStringProperty("bifang.server.encryptpwd.path");
public static final String BIFANG_SERVER_LOGIN_PATH = CommonConfigurations.getStringProperty("bifang.server.login.path");
public static final String BIFANG_SERVER_POLICY_THRESHOLD_PATH = CommonConfigurations.getStringProperty("bifang.server.policy.threshold.path");
@@ -80,6 +74,26 @@ public class CommonConfig {
public static final int SASL_JAAS_CONFIG_FLAG = CommonConfigurations.getIntProperty("sasl.jaas.config.flag");
public static final String NACOS_SERVER_ADDR = CommonConfigurations.getStringProperty("nacos.server.addr");
public static final String NACOS_USERNAME = CommonConfigurations.getStringProperty("nacos.username");
public static final String NACOS_PASSWORD = CommonConfigurations.getStringProperty("nacos.password");
public static final String NACOS_NAMESPACE = CommonConfigurations.getStringProperty("nacos.namespace");
public static final String NACOS_DATA_ID = CommonConfigurations.getStringProperty("nacos.data.id");
public static final String NACOS_GROUP = CommonConfigurations.getStringProperty("nacos.group");
public static final int NACOS_READ_TIMEOUT = CommonConfigurations.getIntProperty("nacos.read.timeout");
public static final String HOS_TOKEN = CommonConfigurations.getStringProperty("hos.token");
public static final String CLUSTER_OR_SINGLE = CommonConfigurations.getStringProperty("cluster.or.single");
public static final String HDFS_URI_NS1 = CommonConfigurations.getStringProperty("hdfs.uri.nn1");
public static final String HDFS_URI_NS2 = CommonConfigurations.getStringProperty("hdfs.uri.nn2");
public static final String HDFS_PATH = CommonConfigurations.getStringProperty("hdfs.path");
public static final String HDFS_USER = CommonConfigurations.getStringProperty("hdfs.user");
public static final String DOWNLOAD_PATH = CommonConfigurations.getStringProperty("download.path");
public static void main(String[] args) {
StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
// 配置加密解密的密码/salt值

View File

@@ -0,0 +1,26 @@
package com.zdjizhi.common;
import java.io.Serializable;
public class CustomFile implements Serializable {
String fileName;
byte[] content;
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public byte[] getContent() {
return content;
}
public void setContent(byte[] content) {
this.content = content;
}
}

View File

@@ -2,6 +2,7 @@ package com.zdjizhi.common;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Objects;
/**
@@ -16,44 +17,8 @@ public class DosDetectionThreshold implements Serializable {
private long bitsPerSec;
private long sessionsPerSec;
private int isValid;
@Override
public String toString() {
return "DosDetectionThreshold{" +
"profileId='" + profileId + '\'' +
", attackType='" + attackType + '\'' +
", serverIpList=" + serverIpList +
", serverIpAddr='" + serverIpAddr + '\'' +
", packetsPerSec=" + packetsPerSec +
", bitsPerSec=" + bitsPerSec +
", sessionsPerSec=" + sessionsPerSec +
", isValid=" + isValid +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DosDetectionThreshold threshold = (DosDetectionThreshold) o;
return packetsPerSec == threshold.packetsPerSec &&
bitsPerSec == threshold.bitsPerSec &&
sessionsPerSec == threshold.sessionsPerSec &&
isValid == threshold.isValid &&
Objects.equals(profileId, threshold.profileId) &&
Objects.equals(attackType, threshold.attackType) &&
Objects.equals(serverIpList, threshold.serverIpList) &&
Objects.equals(serverIpAddr, threshold.serverIpAddr);
}
@Override
public int hashCode() {
return Objects.hash(profileId, attackType, serverIpList, serverIpAddr, packetsPerSec, bitsPerSec, sessionsPerSec, isValid);
}
private int vsysId;
private Integer[] superiorIds;
public String getProfileId() {
return profileId;
@@ -118,4 +83,36 @@ public class DosDetectionThreshold implements Serializable {
public void setIsValid(int isValid) {
this.isValid = isValid;
}
public int getVsysId() {
return vsysId;
}
public void setVsysId(int vsysId) {
this.vsysId = vsysId;
}
public Integer[] getSuperiorIds() {
return superiorIds;
}
public void setSuperiorIds(Integer[] superiorIds) {
this.superiorIds = superiorIds;
}
@Override
public String toString() {
return "DosDetectionThreshold{" +
"profileId='" + profileId + '\'' +
", attackType='" + attackType + '\'' +
", serverIpList=" + serverIpList +
", serverIpAddr='" + serverIpAddr + '\'' +
", packetsPerSec=" + packetsPerSec +
", bitsPerSec=" + bitsPerSec +
", sessionsPerSec=" + sessionsPerSec +
", isValid=" + isValid +
", vsysId=" + vsysId +
", superiorIds=" + Arrays.toString(superiorIds) +
'}';
}
}

View File

@@ -1,11 +1,11 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.Objects;
public class DosEventLog implements Serializable {
public class DosEventLog implements Serializable,Cloneable {
private long log_id;
private int vsys_id;
private long start_time;
private long end_time;
private String attack_type;
@@ -27,6 +27,14 @@ public class DosEventLog implements Serializable {
this.log_id = log_id;
}
public int getVsys_id() {
return vsys_id;
}
public void setVsys_id(int vsys_id) {
this.vsys_id = vsys_id;
}
public long getStart_time() {
return start_time;
}
@@ -125,8 +133,9 @@ public class DosEventLog implements Serializable {
@Override
public String toString() {
return "dosEventLog{" +
return "DosEventLog{" +
"log_id=" + log_id +
", vsys_id=" + vsys_id +
", start_time=" + start_time +
", end_time=" + end_time +
", attack_type='" + attack_type + '\'' +
@@ -143,31 +152,7 @@ public class DosEventLog implements Serializable {
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosEventLog)) {
return false;
}
DosEventLog that = (DosEventLog) o;
return getLog_id() == that.getLog_id() &&
getStart_time() == that.getStart_time() &&
getEnd_time() == that.getEnd_time() &&
getSession_rate() == that.getSession_rate() &&
getPacket_rate() == that.getPacket_rate() &&
getBit_rate() == that.getBit_rate() &&
Objects.equals(getAttack_type(), that.getAttack_type()) &&
Objects.equals(getSeverity(), that.getSeverity()) &&
Objects.equals(getConditions(), that.getConditions()) &&
Objects.equals(getDestination_ip(), that.getDestination_ip()) &&
Objects.equals(getDestination_country(), that.getDestination_country()) &&
Objects.equals(getSource_ip_list(), that.getSource_ip_list()) &&
Objects.equals(getSource_country_list(), that.getSource_country_list());
}
@Override
public int hashCode() {
return Objects.hash(getLog_id(), getStart_time(), getEnd_time(), getAttack_type(), getSeverity(), getConditions(), getDestination_ip(), getDestination_country(), getSource_ip_list(), getSource_country_list(), getSession_rate(), getPacket_rate(), getBit_rate());
public Object clone() throws CloneNotSupportedException {
return super.clone();
}
}

View File

@@ -12,6 +12,7 @@ public class DosMetricsLog implements Serializable {
private long packet_rate;
private long bit_rate;
private int partition_num;
private int vsys_id;
public int getPartition_num() {
return partition_num;
@@ -69,6 +70,14 @@ public class DosMetricsLog implements Serializable {
this.bit_rate = bit_rate;
}
public int getVsys_id() {
return vsys_id;
}
public void setVsys_id(int vsys_id) {
this.vsys_id = vsys_id;
}
@Override
public String toString() {
return "DosMetricsLog{" +
@@ -79,29 +88,7 @@ public class DosMetricsLog implements Serializable {
", packet_rate=" + packet_rate +
", bit_rate=" + bit_rate +
", partition_num=" + partition_num +
", vsys_id=" + vsys_id +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosMetricsLog)) {
return false;
}
DosMetricsLog that = (DosMetricsLog) o;
return getSketch_start_time() == that.getSketch_start_time() &&
getSession_rate() == that.getSession_rate() &&
getPacket_rate() == that.getPacket_rate() &&
getBit_rate() == that.getBit_rate() &&
getPartition_num() == that.getPartition_num() &&
Objects.equals(getAttack_type(), that.getAttack_type()) &&
Objects.equals(getDestination_ip(), that.getDestination_ip());
}
@Override
public int hashCode() {
return Objects.hash(getSketch_start_time(), getAttack_type(), getDestination_ip(), getSession_rate(), getPacket_rate(), getBit_rate(), getPartition_num());
}
}

View File

@@ -15,6 +15,7 @@ public class DosSketchLog implements Serializable {
private long sketch_sessions;
private long sketch_packets;
private long sketch_bytes;
private int vsys_id;
@Override
public String toString() {
@@ -29,35 +30,10 @@ public class DosSketchLog implements Serializable {
", sketch_sessions=" + sketch_sessions +
", sketch_packets=" + sketch_packets +
", sketch_bytes=" + sketch_bytes +
", vsys_id=" + vsys_id +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosSketchLog)) {
return false;
}
DosSketchLog sketchLog = (DosSketchLog) o;
return getSketch_start_time() == sketchLog.getSketch_start_time() &&
getSketch_duration() == sketchLog.getSketch_duration() &&
getSketch_sessions() == sketchLog.getSketch_sessions() &&
getSketch_packets() == sketchLog.getSketch_packets() &&
getSketch_bytes() == sketchLog.getSketch_bytes() &&
Objects.equals(getCommon_sled_ip(), sketchLog.getCommon_sled_ip()) &&
Objects.equals(getCommon_data_center(), sketchLog.getCommon_data_center()) &&
Objects.equals(getAttack_type(), sketchLog.getAttack_type()) &&
Objects.equals(getSource_ip(), sketchLog.getSource_ip()) &&
Objects.equals(getDestination_ip(), sketchLog.getDestination_ip());
}
@Override
public int hashCode() {
return Objects.hash(getCommon_sled_ip(), getCommon_data_center(), getSketch_start_time(), getSketch_duration(), getAttack_type(), getSource_ip(), getDestination_ip(), getSketch_sessions(), getSketch_packets(), getSketch_bytes());
}
public String getCommon_sled_ip() {
return common_sled_ip;
}
@@ -137,4 +113,12 @@ public class DosSketchLog implements Serializable {
public void setSketch_bytes(long sketch_bytes) {
this.sketch_bytes = sketch_bytes;
}
public int getVsys_id() {
return vsys_id;
}
public void setVsys_id(int vsys_id) {
this.vsys_id = vsys_id;
}
}

View File

@@ -1,22 +1,32 @@
package com.zdjizhi.common;
import java.util.Objects;
import java.util.Arrays;
public class DosVsysId {
private int vsysId;
private Integer id;
private Integer[] superiorIds;
public int getVsysId() {
return vsysId;
public Integer getId() {
return id;
}
public void setVsysId(int vsysId) {
this.vsysId = vsysId;
public void setId(Integer id) {
this.id = id;
}
public Integer[] getSuperiorIds() {
return superiorIds;
}
public void setSuperiorIds(Integer[] superiorIds) {
this.superiorIds = superiorIds;
}
@Override
public String toString() {
return "DosVsysId{" +
"vsysId=" + vsysId +
"id=" + id +
", superiorIds=" + Arrays.toString(superiorIds) +
'}';
}
}

View File

@@ -0,0 +1,91 @@
package com.zdjizhi.common;
public class KnowledgeLog {
public String id;
public String name;
public String path;
public Long size;
public String format;
public String sha256;
public String version;
public String updateTime;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public Long getSize() {
return size;
}
public void setSize(Long size) {
this.size = size;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getSha256() {
return sha256;
}
public void setSha256(String sha256) {
this.sha256 = sha256;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getUpdateTime() {
return updateTime;
}
public void setUpdateTime(String updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "KnowledgeLog{" +
"id='" + id + '\'' +
", name='" + name + '\'' +
", path='" + path + '\'' +
", size=" + size +
", format='" + format + '\'' +
", sha256='" + sha256 + '\'' +
", version='" + version + '\'' +
", updateTime='" + updateTime + '\'' +
'}';
}
}

View File

@@ -1,17 +1,17 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.*;
import com.zdjizhi.utils.*;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.text.StrBuilder;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import java.math.BigDecimal;
import java.text.NumberFormat;
@@ -23,12 +23,12 @@ import java.util.concurrent.TimeUnit;
/**
* @author wlh
*/
public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
public class DosDetection extends BroadcastProcessFunction<DosSketchLog,Map<String, byte[]>, DosEventLog> {
private static final Logger logger = LoggerFactory.getLogger(DosDetection.class);
private static final Log logger = LogFactory.get();
private static Map<String, Map<String, DosBaselineThreshold>> baselineMap = new HashMap<>();
private final static NumberFormat PERCENT_INSTANCE = NumberFormat.getPercentInstance();
private HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> thresholdRangeMap;
private HashMap<Integer,HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> thresholdRangeMap;
private final static int BASELINE_SIZE = 144;
private final static int STATIC_CONDITION_TYPE = 1;
@@ -46,6 +46,12 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(2,
new BasicThreadFactory.Builder().namingPattern("Dos-Detection-%d").daemon(true).build());
try {
super.open(parameters);
logger.info("begin init");
IpUtils.loadIpLook();
logger.info("init over");
executorService.scheduleAtFixedRate(() -> thresholdRangeMap = ParseStaticThreshold.createStaticThreshold(), 0,
CommonConfig.STATIC_THRESHOLD_SCHEDULE_MINUTES, TimeUnit.MINUTES);
@@ -58,56 +64,62 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
}
@Override
public DosEventLog map(DosSketchLog value) {
public void processElement(DosSketchLog value, ReadOnlyContext ctx, Collector<DosEventLog> out) {
DosEventLog finalResult = null;
try {
String destinationIp = value.getDestination_ip();
int vsysId = value.getVsys_id();
String key = destinationIp + "-" + vsysId;
String attackType = value.getAttack_type();
IPAddress destinationIpAddress = new IPAddressString(destinationIp).getAddress();
DosDetectionThreshold threshold = thresholdRangeMap.getOrDefault(attackType, TreeRangeMap.create()).get(destinationIpAddress);
logger.debug("当前判断IP{}, 类型: {}", destinationIp, attackType);
if (threshold == null && baselineMap.containsKey(destinationIp)) {
finalResult = getDosEventLogByBaseline(value);
} else if (threshold == null && !baselineMap.containsKey(destinationIp)) {
DosDetectionThreshold threshold = null;
if (thresholdRangeMap.containsKey(vsysId)){
threshold = thresholdRangeMap.get(vsysId).getOrDefault(attackType, TreeRangeMap.create()).get(destinationIpAddress);
}
logger.debug("当前判断IP{}, 类型: {}", key, attackType);
if (threshold == null && baselineMap.containsKey(key)) {
finalResult = getDosEventLogByBaseline(value,key);
} else if (threshold == null && !baselineMap.containsKey(key)) {
finalResult = getDosEventLogBySensitivityThreshold(value);
} else if (threshold != null) {
finalResult = getDosEventLogByStaticThreshold(value, threshold);
} else {
logger.debug("未获取到当前server IP{} 类型 {} 静态阈值 和 baseline", destinationIp, attackType);
logger.debug("未获取到当前server IP{} 类型 {} 静态阈值 和 baseline", key, attackType);
}
} catch (Exception e) {
logger.error("判定失败\n {} \n{}", value, e);
}
return finalResult;
if (finalResult != null){
out.collect(finalResult);
}
}
@Override
public void processBroadcastElement(Map<String, byte[]> value, Context ctx, Collector<DosEventLog> out) throws Exception {
IpUtils.updateIpLook(value);
}
private DosEventLog getDosEventLogBySensitivityThreshold(DosSketchLog value) {
DosEventLog result = null;
long sketchSessions = value.getSketch_sessions();
if (sketchSessions > NacosUtils.getIntProperty("static.sensitivity.threshold")) {
long diff = sketchSessions - NacosUtils.getIntProperty("static.sensitivity.threshold");
result = getDosEventLog(value, NacosUtils.getIntProperty("static.sensitivity.threshold"), diff, SENSITIVITY_CONDITION_TYPE, SESSIONS_TAG);
result.setSeverity(Severity.MAJOR.severity);
}
return result;
Integer staticSensitivityThreshold = NacosUtils.getIntProperty("static.sensitivity.threshold");
long diff = sketchSessions - staticSensitivityThreshold;
return getDosEventLog(value, staticSensitivityThreshold, diff, SENSITIVITY_CONDITION_TYPE, SESSIONS_TAG);
}
private DosEventLog getDosEventLogByBaseline(DosSketchLog value) {
DosEventLog result = null;
String destinationIp = value.getDestination_ip();
private DosEventLog getDosEventLogByBaseline(DosSketchLog value,String key) {
String attackType = value.getAttack_type();
long sketchSessions = value.getSketch_sessions();
if (sketchSessions > NacosUtils.getIntProperty("static.sensitivity.threshold")) {
DosBaselineThreshold dosBaselineThreshold = baselineMap.get(destinationIp).get(attackType);
Integer base = getBaseValue(dosBaselineThreshold, value);
long diff = sketchSessions - base;
result = getDosEventLog(value, base, diff, BASELINE_CONDITION_TYPE, SESSIONS_TAG);
}
return result;
DosBaselineThreshold dosBaselineThreshold = baselineMap.get(key).get(attackType);
Integer base = getBaseValue(dosBaselineThreshold, value);
long diff = sketchSessions - base;
return getDosEventLog(value, base, diff, BASELINE_CONDITION_TYPE, SESSIONS_TAG);
}
private DosEventLog getDosEventLogByStaticThreshold(DosSketchLog value, DosDetectionThreshold threshold) {
private DosEventLog getDosEventLogByStaticThreshold(DosSketchLog value, DosDetectionThreshold threshold) throws CloneNotSupportedException {
long base = threshold.getSessionsPerSec();
long diff = value.getSketch_sessions() - base;
DosEventLog result = getDosEventLog(value, base, diff, STATIC_CONDITION_TYPE, SESSIONS_TAG);
@@ -121,6 +133,21 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
result = getDosEventLog(value, base, diff, STATIC_CONDITION_TYPE, BITS_TAG);
}
}
/*
ArrayList<DosEventLog> dosEventLogs = new ArrayList<>();
if (result != null){
dosEventLogs.add(result);
Integer[] superiorIds = threshold.getSuperiorIds();
if (superiorIds != null && superiorIds.length > 0){
for (Integer integer:superiorIds){
DosEventLog clone = (DosEventLog) result.clone();
clone.setVsys_id(integer);
clone.setLog_id(SnowflakeId.generateId());
dosEventLogs.add(clone);
}
}
}
*/
return result;
}
@@ -131,11 +158,17 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
if (diff > 0 && base != 0) {
double percent = getDiffPercent(diff, base);
Severity severity = judgeSeverity(percent);
Integer staticSensitivityThreshold = NacosUtils.getIntProperty("static.sensitivity.threshold");
if (severity != Severity.NORMAL) {
if (type == BASELINE_CONDITION_TYPE && percent < NacosUtils.getDoubleProperty("baseline.sensitivity.threshold")) {
logger.debug("当前server IP:{},类型:{},基线值{}百分比{}未超过基线敏感阈值,日志详情\n{}", destinationIp, attackType, base, percent, value);
} else {
}else if ((type == BASELINE_CONDITION_TYPE || type == SENSITIVITY_CONDITION_TYPE) && value.getSketch_sessions() < staticSensitivityThreshold){
logger.debug("当前server IP:{},类型:{},基线值{}百分比{}未超过静态敏感阈值,日志详情\n{}",destinationIp, attackType, base, percent, value);
}else {
result = getResult(value, base, severity, percent+1, type, tag);
if (type == SENSITIVITY_CONDITION_TYPE){
result.setSeverity(Severity.MAJOR.severity);
}
logger.info("检测到当前server IP {} 存在 {} 异常,超出基线{} {}倍,基于{}:{}检测,日志详情\n {}", destinationIp,attackType,base,percent,type,tag,result);
}
} else {
@@ -148,6 +181,7 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
private DosEventLog getResult(DosSketchLog value, long base, Severity severity, double percent, int type, String tag) {
DosEventLog dosEventLog = new DosEventLog();
dosEventLog.setLog_id(SnowflakeId.generateId());
dosEventLog.setVsys_id(value.getVsys_id());
dosEventLog.setStart_time(value.getSketch_start_time());
dosEventLog.setEnd_time(value.getSketch_start_time() + value.getSketch_duration());
dosEventLog.setAttack_type(value.getAttack_type());
@@ -192,22 +226,16 @@ public class DosDetection extends RichMapFunction<DosSketchLog, DosEventLog> {
private String getConditions(String percent, long base, long sessions, int type, String tag) {
switch (type) {
case STATIC_CONDITION_TYPE:
return new StrBuilder()
.append("Rate > ")
.append(base).append(" ")
.append(tag).append("/s")
.toString();
return "Rate > " +
base + " " +
tag + "/s";
case BASELINE_CONDITION_TYPE:
return new StrBuilder()
.append(tag).append(" > ")
.append(percent).append(" of baseline")
.toString();
return tag + " > " +
percent + " of baseline";
case SENSITIVITY_CONDITION_TYPE:
return new StrBuilder()
.append(sessions).append(" ")
.append(tag).append("/s Unusually high ")
.append(StringUtils.capitalize(tag))
.toString();
return String.valueOf(sessions) + " " +
tag + "/s Unusually high " +
StringUtils.capitalize(tag);
default:
throw new IllegalArgumentException("Illegal Argument type:" + type + ", known types = [1,2,3]");
}

View File

@@ -1,15 +1,15 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosSketchLog;
import org.apache.commons.lang.StringUtils;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.tuple.Tuple6;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashSet;
@@ -18,14 +18,15 @@ import static com.zdjizhi.sink.OutputStreamSink.outputTag;
/**
* @author 94976
*/
public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosSketchLog, Tuple2<String,String>, TimeWindow> {
public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosSketchLog, Tuple3<String,String,Integer>, TimeWindow> {
private static final Logger logger = LoggerFactory.getLogger(EtlProcessFunction.class);
// private static final Logger logger = LoggerFactory.getLogger(EtlProcessFunction.class);
private static final Log logger = LogFactory.get();
private static final String EMPTY_SOURCE_IP_IPV4 = "0.0.0.0";
private static final String EMPTY_SOURCE_IP_IPV6 = "::";
@Override
public void process(Tuple2<String, String> keys,
public void process(Tuple3<String,String,Integer> keys,
Context context, Iterable<DosSketchLog> elements,
Collector<DosSketchLog> out) {
DosSketchLog middleResult = getMiddleResult(keys, elements);
@@ -40,7 +41,7 @@ public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosS
}
}
private DosSketchLog getMiddleResult(Tuple2<String, String> keys,Iterable<DosSketchLog> elements){
private DosSketchLog getMiddleResult(Tuple3<String,String,Integer> keys,Iterable<DosSketchLog> elements){
DosSketchLog midResuleLog = new DosSketchLog();
Tuple6<Long, Long, Long,String,Long,Long> values = sketchAggregate(elements);
@@ -48,6 +49,7 @@ public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosS
if (values != null){
midResuleLog.setAttack_type(keys.f0);
midResuleLog.setDestination_ip(keys.f1);
midResuleLog.setVsys_id(keys.f2);
midResuleLog.setSketch_start_time(values.f4);
midResuleLog.setSketch_duration(values.f5);
midResuleLog.setSource_ip(values.f3);

View File

@@ -1,5 +1,7 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosBaselineThreshold;
import com.zdjizhi.utils.DateUtils;
@@ -9,15 +11,14 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
public class ParseBaselineThreshold {
private static final Logger logger = LoggerFactory.getLogger(ParseBaselineThreshold.class);
// private static final Logger logger = LoggerFactory.getLogger(ParseBaselineThreshold.class);
private static final Log logger = LogFactory.get();
private static ArrayList<String> floodTypeList = new ArrayList<>();
private static Table table = null;

View File

@@ -15,8 +15,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.*;
/**
* @author wlh
@@ -52,12 +51,14 @@ public class ParseSketchLog {
long sketchStartTime = Long.parseLong(sketchSource.get("sketch_start_time").toString());
long sketchDuration = Long.parseLong(sketchSource.get("sketch_duration").toString());
String attackType = sketchSource.get("attack_type").toString();
int vsysId = Integer.parseInt(sketchSource.getOrDefault("common_vsys_id", 1).toString());
ArrayList<HashMap<String, Object>> reportIpList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(sketchSource.get("report_ip_list")), listType);
for (HashMap<String, Object> obj : reportIpList) {
DosSketchLog dosSketchLog = new DosSketchLog();
dosSketchLog.setSketch_start_time(sketchStartTime);
dosSketchLog.setSketch_duration(sketchDuration);
dosSketchLog.setAttack_type(attackType);
dosSketchLog.setVsys_id(vsysId);
String sourceIp = obj.get("source_ip").toString();
String destinationIp = obj.get("destination_ip").toString();
long sketchSessions = Long.parseLong(obj.get("sketch_sessions").toString());

View File

@@ -1,5 +1,7 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.fasterxml.jackson.databind.JavaType;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosDetectionThreshold;
@@ -13,8 +15,6 @@ import org.apache.flink.shaded.guava18.com.google.common.collect.Range;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URISyntaxException;
import java.util.ArrayList;
@@ -25,7 +25,8 @@ import java.util.Map;
* @author wlh
*/
public class ParseStaticThreshold {
private static Logger logger = LoggerFactory.getLogger(ParseStaticThreshold.class);
// private static Logger logger = LoggerFactory.getLogger(ParseStaticThreshold.class);
private static final Log logger = LogFactory.get();
private static String encryptpwd;
private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
@@ -112,7 +113,8 @@ public class ParseStaticThreshold {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
parms.put("orderBy", "vsysId desc");
// parms.put("orderBy", "vsysId desc");
parms.put("type", 1);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_VSYSID_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
@@ -128,7 +130,7 @@ public class ParseStaticThreshold {
Object list = data.get("list");
if (list != null) {
vsysIdList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), vsysIDType);
logger.info("获取到vsysId{}条", vsysIdList.size());
logger.info("获取到vsysId {}条", vsysIdList.size());
} else {
logger.warn("vsysIdList为空");
}
@@ -149,49 +151,54 @@ public class ParseStaticThreshold {
* @return thresholds
*/
private static ArrayList<DosDetectionThreshold> getDosDetectionThreshold() {
ArrayList<DosDetectionThreshold> thresholds = null;
// ArrayList<DosVsysId> vsysId = getVsysId();
ArrayList<DosDetectionThreshold> vsysThresholds = new ArrayList<>();
ArrayList<DosVsysId> vsysIds = getVsysId();
try {
// if (vsysId != null){
// for (DosVsysId dosVsysId : vsysId) {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
parms.put("orderBy", "profileId asc");
parms.put("isValid", 1);
// parms.put("vsysId", dosVsysId.getVsysId());
parms.put("vsysId", 1);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_THRESHOLD_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
BasicHeader authorization = new BasicHeader("Authorization", token);
BasicHeader authorization1 = new BasicHeader("Content-Type", "application/x-www-form-urlencoded");
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build(), authorization, authorization1);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
Object list = data.get("list");
if (list != null) {
thresholds = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), thresholdType);
logger.info("获取到静态阈值配置{}条", thresholds.size());
} else {
logger.warn("静态阈值配置为空");
if (vsysIds != null) {
for (DosVsysId dosVsysId : vsysIds) {
Integer vsysId = dosVsysId.getId() == null ? 1 : dosVsysId.getId();
Integer[] superiorIds = dosVsysId.getSuperiorIds();
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
parms.put("orderBy", "profileId asc");
parms.put("isValid", 1);
parms.put("vsysId", vsysId);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_THRESHOLD_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
BasicHeader authorization = new BasicHeader("Authorization", token);
BasicHeader authorization1 = new BasicHeader("Content-Type", "application/x-www-form-urlencoded");
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build(), authorization, authorization1);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
Object list = data.get("list");
if (list != null) {
ArrayList<DosDetectionThreshold> thresholds = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), thresholdType);
for (DosDetectionThreshold dosDetectionThreshold : thresholds) {
dosDetectionThreshold.setSuperiorIds(superiorIds);
vsysThresholds.add(dosDetectionThreshold);
}
logger.info("获取到vsys id是{}静态阈值配置{}条", vsysId, thresholds.size());
} else {
logger.warn("静态阈值配置为空");
}
} else {
logger.error(msg);
}
}
} else {
logger.error(msg);
}
}
}
// }
// }
} catch (Exception e) {
logger.error("获取静态阈值配置失败,请检查bifang服务或登录配置信息 ", e);
}
return thresholds;
return vsysThresholds;
}
/**
@@ -199,14 +206,17 @@ public class ParseStaticThreshold {
*
* @return threshold RangeMap
*/
static HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> createStaticThreshold() {
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> thresholdRangeMap = new HashMap<>(4);
static HashMap<Integer, HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> createStaticThreshold() {
HashMap<Integer, HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> thresholdRangeMap = new HashMap<>(4);
try {
ArrayList<DosDetectionThreshold> dosDetectionThreshold = getDosDetectionThreshold();
if (dosDetectionThreshold != null && !dosDetectionThreshold.isEmpty()) {
for (DosDetectionThreshold threshold : dosDetectionThreshold) {
String attackType = threshold.getAttackType();
TreeRangeMap<IPAddress, DosDetectionThreshold> treeRangeMap = thresholdRangeMap.getOrDefault(attackType, TreeRangeMap.create());
int vsysId = threshold.getVsysId();
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> rangeMap = thresholdRangeMap.getOrDefault(vsysId, new HashMap<>());
TreeRangeMap<IPAddress, DosDetectionThreshold> treeRangeMap = rangeMap.getOrDefault(attackType, TreeRangeMap.create());
ArrayList<String> serverIpList = threshold.getServerIpList();
for (String sip : serverIpList) {
IPAddressString ipAddressString = new IPAddressString(sip);
@@ -239,7 +249,8 @@ public class ParseStaticThreshold {
}
}
}
thresholdRangeMap.put(attackType, treeRangeMap);
rangeMap.put(attackType, treeRangeMap);
thresholdRangeMap.put(vsysId, rangeMap);
}
}
} catch (Exception e) {
@@ -249,22 +260,27 @@ public class ParseStaticThreshold {
}
public static void main(String[] args) {
/*
ArrayList<DosDetectionThreshold> dosDetectionThreshold = getDosDetectionThreshold();
dosDetectionThreshold.forEach(System.out::println);
// dosDetectionThreshold.forEach(System.out::println);
getVsysId().forEach(System.out::println);
System.out.println("------------------------");
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> staticThreshold = createStaticThreshold();
*/
HashMap<Integer, HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> staticThreshold = createStaticThreshold();
System.out.println("------------------------");
for (String type : staticThreshold.keySet()) {
Map<Range<IPAddress>, DosDetectionThreshold> asMapOfRanges = staticThreshold.get(type).asMapOfRanges();
for (Range<IPAddress> range : asMapOfRanges.keySet()) {
DosDetectionThreshold threshold = asMapOfRanges.get(range);
System.out.println(type + "---" + range + "---" + threshold);
for (Integer integer : staticThreshold.keySet()) {
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> stringTreeRangeMapHashMap = staticThreshold.get(integer);
for (String type : stringTreeRangeMapHashMap.keySet()) {
Map<Range<IPAddress>, DosDetectionThreshold> asMapOfRanges = stringTreeRangeMapHashMap.get(type).asMapOfRanges();
for (Range<IPAddress> range : asMapOfRanges.keySet()) {
DosDetectionThreshold threshold = asMapOfRanges.get(range);
System.out.println(integer + "---" + type + "---" + range + "---" + threshold);
}
System.out.println("------------------------");
}
System.out.println("------------------------");
}
// String s = loginBifangServer();
// System.out.println(s);

View File

@@ -1,15 +1,15 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosMetricsLog;
import com.zdjizhi.common.DosSketchLog;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class TrafficServerIpMetrics {
private static final Logger logger = LoggerFactory.getLogger(TrafficServerIpMetrics.class);
// private static final Logger logger = LoggerFactory.getLogger(TrafficServerIpMetrics.class);
private static final Log logger = LogFactory.get();
static DosMetricsLog getOutputMetric(DosSketchLog midResuleLog) {
DosMetricsLog dosMetricsLog = new DosMetricsLog();
@@ -19,6 +19,7 @@ class TrafficServerIpMetrics {
dosMetricsLog.setSession_rate(midResuleLog.getSketch_sessions());
dosMetricsLog.setPacket_rate(midResuleLog.getSketch_packets());
dosMetricsLog.setBit_rate(midResuleLog.getSketch_bytes());
dosMetricsLog.setVsys_id(midResuleLog.getVsys_id());
dosMetricsLog.setPartition_num(getPartitionNumByIp(midResuleLog.getDestination_ip()));
logger.debug("metric 结果已加载:{}",dosMetricsLog.toString());
return dosMetricsLog;

View File

@@ -1,5 +1,8 @@
package com.zdjizhi.sink;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosEventLog;
import com.zdjizhi.common.DosMetricsLog;
@@ -7,21 +10,27 @@ import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.etl.DosDetection;
import com.zdjizhi.etl.EtlProcessFunction;
import com.zdjizhi.etl.ParseSketchLog;
import com.zdjizhi.source.DosSketchSource;
import com.zdjizhi.utils.FlinkEnvironmentUtils;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.OutputTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.Properties;
/**
* @author 94976
*/
public class OutputStreamSink {
private static final Logger logger = LoggerFactory.getLogger(OutputStreamSink.class);
// private static final Logger logger = LoggerFactory.getLogger(OutputStreamSink.class);
private static final Log logger = LogFactory.get();
public static OutputTag<DosMetricsLog> outputTag = new OutputTag<DosMetricsLog>("traffic server ip metrics"){};
@@ -37,7 +46,29 @@ public class OutputStreamSink {
}
private static SingleOutputStreamOperator<DosEventLog> getEventSinkStream(SingleOutputStreamOperator<DosSketchLog> middleStream){
return middleStream.map(new DosDetection()).setParallelism(CommonConfig.FLINK_DETECTION_MAP_PARALLELISM);
DataStreamSource<Map<String, byte[]>> broadcastSource=null;
Properties nacosProperties = new Properties();
nacosProperties.put(PropertyKeyConst.SERVER_ADDR,CommonConfig.NACOS_SERVER_ADDR);
nacosProperties.setProperty(PropertyKeyConst.USERNAME, CommonConfig.NACOS_USERNAME);
nacosProperties.setProperty(PropertyKeyConst.PASSWORD, CommonConfig.NACOS_PASSWORD);
nacosProperties.setProperty(PropertyKeyConst.NAMESPACE, CommonConfig.NACOS_NAMESPACE);
if ("CLUSTER".equals(CommonConfig.CLUSTER_OR_SINGLE)){
broadcastSource = DosSketchSource.broadcastSource(nacosProperties,CommonConfig.HDFS_PATH);
}else {
broadcastSource= DosSketchSource.singleBroadcastSource(nacosProperties);
}
MapStateDescriptor<String,Map> descriptor =
new MapStateDescriptor<>("descriptorTest", Types.STRING, TypeInformation.of(Map.class));
BroadcastStream<Map<String, byte[]>> broadcast = broadcastSource.broadcast(descriptor);
return middleStream
.connect(broadcast)
.process(new DosDetection()).setParallelism(CommonConfig.FLINK_DETECTION_MAP_PARALLELISM);
}
private static SingleOutputStreamOperator<DosSketchLog> getMiddleStream(){
@@ -48,12 +79,13 @@ public class OutputStreamSink {
.setParallelism(CommonConfig.FLINK_FIRST_AGG_PARALLELISM);
}
private static class KeysSelector implements KeySelector<DosSketchLog, Tuple2<String, String>>{
private static class KeysSelector implements KeySelector<DosSketchLog, Tuple3<String, String, Integer>>{
@Override
public Tuple2<String, String> getKey(DosSketchLog dosSketchLog){
return Tuple2.of(
public Tuple3<String, String, Integer> getKey(DosSketchLog dosSketchLog){
return Tuple3.of(
dosSketchLog.getAttack_type(),
dosSketchLog.getDestination_ip());
dosSketchLog.getDestination_ip(),
dosSketchLog.getVsys_id());
}
}

View File

@@ -16,6 +16,10 @@ class TrafficServerIpMetricsSink {
DataStream<DosMetricsLog> sideOutput = outputStream.getSideOutput(outputTag);
sideOutput.map(JsonMapper::toJsonString).addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_METRIC_TOPIC_NAME))
.setParallelism(CommonConfig.KAFKA_OUTPUT_METRIC_PARALLELISM);
}
}

View File

@@ -1,12 +1,15 @@
package com.zdjizhi.source;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.utils.FlinkEnvironmentUtils;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
@@ -31,4 +34,13 @@ public class DosSketchSource {
new SimpleStringSchema(), properties))
.setParallelism(CommonConfig.KAFKA_INPUT_PARALLELISM);
}
public static DataStreamSource<Map<String, byte[]>> broadcastSource(Properties nacosProperties, String STORE_PATH){
return streamExeEnv.addSource(new HttpSource(nacosProperties, CommonConfig.NACOS_DATA_ID, CommonConfig.NACOS_GROUP, CommonConfig.NACOS_READ_TIMEOUT,STORE_PATH));
}
public static DataStreamSource<Map<String, byte[]>> singleBroadcastSource(Properties nacosProperties){
return streamExeEnv.addSource(new SingleHttpSource(nacosProperties, CommonConfig.NACOS_DATA_ID, CommonConfig.NACOS_GROUP, CommonConfig.NACOS_READ_TIMEOUT));
}
}

View File

@@ -0,0 +1,182 @@
package com.zdjizhi.source;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.json.JSONObject;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.base.Joiner;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.KnowledgeLog;
import com.zdjizhi.utils.*;
import org.apache.commons.io.IOUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executor;
public class HttpSource extends RichHttpSourceFunction<Map<String, byte[]>> {
private static final Logger logger = LoggerFactory.getLogger(HttpSource.class);
private static final String EXPR = "$.[?(@.version=='latest' && @.name in ['ip_v4_built_in','ip_v6_built_in','ip_v4_user_defined','ip_v6_user_defined'])].['name','sha256','format','path']";
//连接nacos的配置
private Properties nacosProperties;
//nacos data id
private String NACOS_DATA_ID;
//nacos group
private String NACOS_GROUP;
//nacos 连接超时时间
private long NACOS_READ_TIMEOUT;
//上传到hdfs的路径
private String STORE_PATH;
private ConfigService configService;
// private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
// private static JavaType listType = jsonMapperInstance.createCollectionType(List.class, KnowledgeLog.class);
private static Map<String, String> updateMap = new HashMap<>();
private static HashMap<String, byte[]> knowledgeFileCache;
private boolean isRunning = true;
public HttpSource(Properties nacosProperties, String NACOS_DATA_ID, String NACOS_GROUP, long NACOS_READ_TIMEOUT, String storePath) {
this.nacosProperties = nacosProperties;
this.NACOS_DATA_ID = NACOS_DATA_ID;
this.NACOS_GROUP = NACOS_GROUP;
this.NACOS_READ_TIMEOUT = NACOS_READ_TIMEOUT;
this.STORE_PATH = storePath;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
//初始化元数据缓存
updateMap = new HashMap<>(16);
//初始化定位库缓存
knowledgeFileCache = new HashMap<>(16);
logger.info("连接nacos" + nacosProperties.getProperty(PropertyKeyConst.SERVER_ADDR));
configService = NacosFactory.createConfigService(nacosProperties);
}
@Override
public void run(SourceContext ctx) throws Exception {
// ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String format = formatter.format(new Date());
logger.info(format + "receive config from nacos:" + config);
System.out.println(format + "receive config from nacos:" + config);
if (StringUtil.isNotBlank(config)) {
ArrayList<Object> metaList = JsonPath.parse(config).read(EXPR);
loadKnowledge(metaList);
}
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
logger.info("receive update config:" + configMsg);
if (StringUtil.isNotBlank(configMsg)) {
ArrayList<Object> metaList = JsonPath.parse(configMsg).read(EXPR);
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
if (!sha256.equals(updateMap.get(fileName))) {
updateMap.put(fileName, sha256);
updateKnowledge(fileName, filePath);
}
}
ctx.collect(knowledgeFileCache);
}
}
} catch (Exception e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
while (isRunning) {
Thread.sleep(10000);
}
}
private void loadKnowledge(ArrayList<Object> metaList) {
InputStream inputStream = null;
try {
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
updateMap.put(fileName, sha256);
knowledgeFileCache.put(fileName, IOUtils.toByteArray(inputStream));
}
}
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
}
}
private void updateKnowledge(String fileName, String filePath) {
InputStream inputStream = null;
FileOutputStream outputStream = null;
try {
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
byte[] bytes = IOUtils.toByteArray(inputStream);
HdfsUtils.uploadFileByBytes(CommonConfig.HDFS_PATH + fileName, bytes);
knowledgeFileCache.put(fileName, bytes);
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
}
}
@Override
public void cancel() {
this.isRunning = false;
}
}

View File

@@ -0,0 +1,6 @@
package com.zdjizhi.source;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
public interface HttpSourceFunction<OUT> extends SourceFunction<OUT> {
}

View File

@@ -0,0 +1,10 @@
package com.zdjizhi.source;
import org.apache.flink.api.common.functions.AbstractRichFunction;
public abstract class RichHttpSourceFunction<OUT> extends AbstractRichFunction implements HttpSourceFunction<OUT> {
private static final long serialVersionUID = 1L;
public RichHttpSourceFunction() {
}
}

View File

@@ -0,0 +1,217 @@
package com.zdjizhi.source;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.json.JSONObject;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.base.Joiner;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.KnowledgeLog;
import com.zdjizhi.utils.*;
import org.apache.commons.io.IOUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.*;
import java.util.concurrent.Executor;
public class SingleHttpSource extends RichHttpSourceFunction<Map<String, byte[]>> {
private static final Logger logger = LoggerFactory.getLogger(SingleHttpSource.class);
private static HashMap<String, byte[]> knowledgeFileCache;
private Properties nacosProperties;
private String NACOS_DATA_ID;
private String NACOS_GROUP;
private long NACOS_READ_TIMEOUT;
private static String STORE_PATH;
private ConfigService configService;
// private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
// private static JavaType listType = jsonMapperInstance.createCollectionType(List.class, KnowledgeLog.class);
private static final String EXPR = "$.[?(@.version=='latest' && @.name in ['ip_v4_built_in','ip_v6_built_in','ip_v4_user_defined','ip_v6_user_defined'])].['name','sha256','format','path']";
private static Map<String, String> updateMap = new HashMap<>();
private boolean isRunning = true;
public SingleHttpSource(Properties nacosProperties, String NACOS_DATA_ID, String NACOS_GROUP, long NACOS_READ_TIMEOUT) {
this.nacosProperties = nacosProperties;
this.NACOS_DATA_ID = NACOS_DATA_ID;
this.NACOS_GROUP = NACOS_GROUP;
this.NACOS_READ_TIMEOUT = NACOS_READ_TIMEOUT;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
logger.info("连接nacos" + nacosProperties.getProperty(PropertyKeyConst.SERVER_ADDR));
configService = NacosFactory.createConfigService(nacosProperties);
//初始化元数据缓存
updateMap = new HashMap<>(16);
//初始化定位库缓存
knowledgeFileCache = new HashMap<>(16);
}
@Override
public void run(SourceContext ctx) throws Exception {
// ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
// List<CustomFile> customFiles = new ArrayList<>();
if (StringUtil.isNotBlank(config)) {
ArrayList<Object> metaList = JsonPath.parse(config).read(EXPR);
loadKnowledge(metaList);
}
// if (StringUtil.isNotBlank(config)) {
// List<KnowledgeLog> knowledgeLogListList = jsonMapperInstance.fromJson(config, listType);
// if (knowledgeLogListList.size()>=1){
// for (KnowledgeLog knowledgeLog : knowledgeLogListList) {
// String name = knowledgeLog.getName().concat(".").concat(knowledgeLog.getFormat());
// String sha256 = knowledgeLog.getSha256();
// updateMap.put(name,sha256);
// }
// }
// }
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
logger.info("receive update config:" + configMsg);
if (StringUtil.isNotBlank(configMsg)) {
ArrayList<Object> metaList = JsonPath.parse(configMsg).read(EXPR);
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
if (!sha256.equals(updateMap.get(fileName))) {
updateMap.put(fileName, sha256);
updateKnowledge(fileName, filePath);
}
}
ctx.collect(knowledgeFileCache);
}
}
} catch (Exception e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
while (isRunning) {
Thread.sleep(10000);
}
}
// private CustomFile loadKnowledge(String fileName, String filePath) {
// InputStream inputStream = null;
// FileOutputStream outputStream = null;
// CustomFile customFile = new CustomFile();
// try {
// customFile.setFileName(fileName);
// Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
// HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
// inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
// FileUtil.mkdir(CommonConfig.DOWNLOAD_PATH);
// File file = new File(CommonConfig.DOWNLOAD_PATH.concat(File.separator).concat(fileName));
// outputStream = new FileOutputStream(file);
// byte[] bytes = IOUtils.toByteArray(inputStream);
// customFile.setContent(bytes);
// inputStream = new ByteArrayInputStream(customFile.getContent());
// IoUtil.copy(inputStream, outputStream);
//
// } catch (IOException ioException) {
// ioException.printStackTrace();
// } finally {
// IOUtils.closeQuietly(inputStream);
// IOUtils.closeQuietly(outputStream);
// }
// return customFile;
// }
private void loadKnowledge(ArrayList<Object> metaList) {
InputStream inputStream = null;
try {
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
updateMap.put(fileName, sha256);
knowledgeFileCache.put(fileName, IOUtils.toByteArray(inputStream));
}
}
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
}
}
private void updateKnowledge(String fileName, String filePath) {
InputStream inputStream = null;
FileOutputStream outputStream = null;
try {
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
FileUtil.mkdir(CommonConfig.DOWNLOAD_PATH);
File file = new File(CommonConfig.DOWNLOAD_PATH.concat(File.separator).concat(fileName));
outputStream = new FileOutputStream(file);
byte[] bytes = IOUtils.toByteArray(inputStream);
knowledgeFileCache.put(fileName, bytes);
inputStream=new ByteArrayInputStream(bytes);
IoUtil.copy(inputStream, outputStream);
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
}
}
@Override
public void cancel() {
this.isRunning = false;
}
}

View File

@@ -1,9 +1,9 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
@@ -17,7 +17,8 @@ import java.util.concurrent.locks.Lock;
public class DistributedLock implements Lock, Watcher {
private static final Logger logger = LoggerFactory.getLogger(DistributedLock.class);
// private static final Logger logger = LoggerFactory.getLogger(DistributedLock.class);
private static final Log logger = LogFactory.get();
private ZooKeeper zk = null;
/**

View File

@@ -0,0 +1,24 @@
package com.zdjizhi.utils;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
public class FileByteUtils {
public static byte[] getFileBytes (String filePath) throws IOException {
File file = new File(filePath);
FileInputStream fis = new FileInputStream(file);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
byte[] b = new byte[1024];
int n;
while ((n = fis.read(b)) != -1) {
bos.write(b, 0, n);
}
fis.close();
byte[] data = bos.toByteArray();
bos.close();
return data;
}
}

View File

@@ -0,0 +1,75 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class HdfsUtils {
private static final Log logger = LogFactory.get();
private static FileSystem fileSystem;
static {
Configuration configuration = new Configuration();
try {
//配置hdfs相关信息
configuration.set("fs.defaultFS","hdfs://ns1");
configuration.set("hadoop.proxyuser.root.hosts","*");
configuration.set("hadoop.proxyuser.root.groups","*");
configuration.set("ha.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
configuration.set("dfs.nameservices","ns1");
configuration.set("dfs.ha.namenodes.ns1","nn1,nn2");
configuration.set("dfs.namenode.rpc-address.ns1.nn1",CommonConfig.HDFS_URI_NS1);
configuration.set("dfs.namenode.rpc-address.ns1.nn2",CommonConfig.HDFS_URI_NS2);
configuration.set("dfs.client.failover.proxy.provider.ns1","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
//指定用户
System.setProperty("HADOOP_USER_NAME", CommonConfig.HDFS_USER);
//创建fileSystem,用于连接hdfs
fileSystem = FileSystem.get(configuration);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static boolean isExists(String filePath) throws IOException {
return fileSystem.exists(new Path(filePath));
}
public static byte[] getFileBytes(String filePath) throws IOException {
try (FSDataInputStream open = fileSystem.open(new Path(filePath))) {
byte[] bytes = new byte[open.available()];
open.read(0, bytes, 0, open.available());
return bytes;
} catch (IOException e) {
logger.error("An I/O exception when files are download from HDFS. Message is :" + e.getMessage());
}
return null;
}
public static void uploadFileByBytes(String filePath,byte[] bytes) throws IOException {
try (FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path(filePath), true)) {
fsDataOutputStream.write(bytes);
// fsDataOutputStream.flush();
} catch (RuntimeException e) {
logger.error("Uploading files to the HDFS is abnormal. Message is :" + e.getMessage());
} catch (IOException e) {
logger.error("An I/O exception when files are uploaded to HDFS. Message is :" + e.getMessage());
}
}
public static void rename(String src, String dst) throws IOException {
fileSystem.rename(new Path(src),new Path(dst));
}
}

View File

@@ -1,5 +1,7 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.apache.http.*;
import org.apache.http.client.ClientProtocolException;
@@ -18,8 +20,6 @@ import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLHandshakeException;
@@ -38,7 +38,8 @@ public class HttpClientUtils {
/** 全局连接池对象 */
private static final PoolingHttpClientConnectionManager CONN_MANAGER = new PoolingHttpClientConnectionManager();
private static Logger logger = LoggerFactory.getLogger(HttpClientUtils.class);
// private static Logger logger = LoggerFactory.getLogger(HttpClientUtils.class);
private static final Log logger = LogFactory.get();
public static final String ERROR_MESSAGE = "-1";
/*

View File

@@ -0,0 +1,234 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.commons.io.IOUtils;
import org.apache.http.*;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.X509Certificate;
import java.util.Map;
import static org.apache.kafka.common.requests.FetchMetadata.log;
/**
* http client工具类
*/
public class HttpClientUtils2 {
/** 全局连接池对象 */
private static final PoolingHttpClientConnectionManager CONN_MANAGER = new PoolingHttpClientConnectionManager();
private static Logger logger = LoggerFactory.getLogger(HttpClientUtils2.class);
public static final String ERROR_MESSAGE = "-1";
/*
* 静态代码块配置连接池信息
*/
static {
// 设置最大连接数
CONN_MANAGER.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
CONN_MANAGER.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
}
/**
* 在调用SSL之前需要重写验证方法取消检测SSL
* 创建ConnectionManager添加Connection配置信息
*
* @return HttpClient 支持https
*/
private PoolingHttpClientConnectionManager getSslClientManager() {
try {
// 在调用SSL之前需要重写验证方法取消检测SSL
X509TrustManager trustManager = new X509TrustManager() {
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
@Override
public void checkClientTrusted(X509Certificate[] xcs, String str) {
}
@Override
public void checkServerTrusted(X509Certificate[] xcs, String str) {
}
};
SSLContext ctx = SSLContext.getInstance(SSLConnectionSocketFactory.TLS);
ctx.init(null, new TrustManager[]{trustManager}, null);
SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(ctx, NoopHostnameVerifier.INSTANCE);
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.INSTANCE)
.register("https", socketFactory).build();
// 创建ConnectionManager添加Connection配置信息
PoolingHttpClientConnectionManager connManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
// 设置最大连接数
connManager.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
connManager.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
return connManager;
} catch (KeyManagementException | NoSuchAlgorithmException e) {
throw new RuntimeException(e.getMessage());
}
}
/**
* 获取Http客户端连接对象
* @return Http客户端连接对象
*/
private CloseableHttpClient getHttpClient() {
// 创建Http请求配置参数
RequestConfig requestConfig = RequestConfig.custom()
// 获取连接超时时间
.setConnectionRequestTimeout(CommonConfig.HTTP_POOL_REQUEST_TIMEOUT)
// 请求超时时间
.setConnectTimeout(CommonConfig.HTTP_POOL_CONNECT_TIMEOUT)
// 响应超时时间
.setSocketTimeout(CommonConfig.HTTP_POOL_RESPONSE_TIMEOUT)
.build();
/*
* 测出超时重试机制为了防止超时不生效而设置
* 如果直接放回false,不重试
* 这里会根据情况进行判断是否重试
*/
HttpRequestRetryHandler retry = (exception, executionCount, context) -> {
if (executionCount >= 3) {// 如果已经重试了3次就放弃
return false;
}
if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试
return true;
}
if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常
return false;
}
if (exception instanceof UnknownHostException) {// 目标服务器不可达
return false;
}
if (exception instanceof ConnectTimeoutException) {// 连接被拒绝
return false;
}
if (exception instanceof HttpHostConnectException) {// 连接被拒绝
return false;
}
if (exception instanceof SSLException) {// ssl握手异常
return false;
}
if (exception instanceof InterruptedIOException) {// 超时
return true;
}
HttpClientContext clientContext = HttpClientContext.adapt(context);
HttpRequest request = clientContext.getRequest();
// 如果请求是幂等的,就再次尝试
return !(request instanceof HttpEntityEnclosingRequest);
};
ConnectionKeepAliveStrategy myStrategy = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && "timeout".equalsIgnoreCase(param)) {
return Long.parseLong(value) * 1000;
}
}
return 60 * 1000;//如果没有约定则默认定义时长为60s
};
// 创建httpClient
return HttpClients.custom()
// 把请求相关的超时信息设置到连接客户端
.setDefaultRequestConfig(requestConfig)
// 把请求重试设置到连接客户端
.setRetryHandler(retry)
.setKeepAliveStrategy(myStrategy)
// 配置连接池管理对象
.setConnectionManager(getSslClientManager())
.build();
}
// TODO: 2022/10/19 加载知识库
public InputStream httpGetInputStream(String url, int socketTimeout, Header... headers) {
InputStream result = null;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();// TODO: 2022/10/19 去掉了 socketTimeout
// 创建GET请求对象
HttpGet httpGet = new HttpGet(url);
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpGet.addHeader(h);
}
}
CloseableHttpResponse response = null;
try {
// 执行请求
response = httpClient.execute(httpGet);
// 获取响应实体
result = IOUtils.toBufferedInputStream(response.getEntity().getContent());
// 获取响应信息
EntityUtils.consume(response.getEntity());
} catch (ClientProtocolException e) {
log.error("current file: {},Protocol error:{}", url, e.getMessage());
} catch (ParseException e) {
log.error("current file: {}, Parser error:{}", url, e.getMessage());
} catch (IOException e) {
log.error("current file: {},IO error:{}", url, e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consume(response.getEntity());
response.close();
} catch (IOException e) {
log.error("Release Connection error:{}", e.getMessage());
}
}
return result;
}
}
}

View File

@@ -0,0 +1,21 @@
package com.zdjizhi.utils;
import lombok.Data;
/**
* @author fy
* @version 1.0
* @date 2022/10/19 18:27
*/
@Data
public class IpLocationConfiguration {
private String ipV4UserDefined;
private String ipV4BuiltIn;
private String ipV6UserDefined;
private String ipV6BuiltIn;
}

View File

@@ -1,18 +1,104 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
public class IpUtils {
public static IpLookupV2 ipLookup ;
private static Logger LOG = LoggerFactory.getLogger(IpUtils.class);
/**
* IP定位库工具类
*/
public static IpLookupV2 ipLookup = new IpLookupV2.Builder(false)
.loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4_built_in.mmdb")
.loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6_built_in.mmdb")
.loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_v4_user_defined.mmdb")
.loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_v6_user_defined.mmdb")
.build();
// public static IpLookupV2 ipLookup = new IpLookupV2.Builder(false)
// .loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4_built_in.mmdb")
// .loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6_built_in.mmdb")
// .loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_v4_user_defined.mmdb")
// .loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_v6_user_defined.mmdb")
// .build();
public static void loadIpLook(){
try {
IpLookupV2.Builder builder = new IpLookupV2.Builder(false);
if ("CLUSTER".equals(CommonConfig.CLUSTER_OR_SINGLE)) {
byte[] ipv4BuiltBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v4_built_in.mmdb");
if (ipv4BuiltBytes!=null){
InputStream ipv4BuiltInputStream = new ByteArrayInputStream(ipv4BuiltBytes);
builder.loadDataFileV4(ipv4BuiltInputStream);
}
byte[] ipv6BuiltBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v6_built_in.mmdb");
if (ipv6BuiltBytes!=null){
InputStream ipv6BuiltInputStream = new ByteArrayInputStream(ipv6BuiltBytes);
builder.loadDataFileV6(ipv6BuiltInputStream);
}
byte[] ipv4UserBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v4_user_defined.mmdb");
if (ipv4UserBytes!=null){
InputStream ipv4UserInputStream = new ByteArrayInputStream(ipv4UserBytes);
builder.loadDataFilePrivateV4(ipv4UserInputStream);
}
byte[] ipv6UserBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v6_user_defined.mmdb");
if (ipv6UserBytes!=null){
InputStream ipv6UserInputStream = new ByteArrayInputStream(ipv6UserBytes);
builder.loadDataFilePrivateV6(ipv6UserInputStream);
}
}else if ("SINGLE".equals(CommonConfig.CLUSTER_OR_SINGLE)){
byte[] ipv4BuiltBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v4_built_in.mmdb");
if (ipv4BuiltBytes!=null){
InputStream ipv4BuiltInputStream = new ByteArrayInputStream(ipv4BuiltBytes);
builder.loadDataFileV4(ipv4BuiltInputStream);
}
byte[] ipv6BuiltBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v6_built_in.mmdb");
if (ipv6BuiltBytes!=null){
InputStream ipv6BuiltInputStream = new ByteArrayInputStream(ipv6BuiltBytes);
builder.loadDataFileV6(ipv6BuiltInputStream);
}
byte[] ipv4UserBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v4_user_defined.mmdb");
if (ipv4UserBytes!=null){
InputStream ipv4UserInputStream = new ByteArrayInputStream(ipv4UserBytes);
builder.loadDataFilePrivateV4(ipv4UserInputStream);
}
byte[] ipv6UserBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v6_user_defined.mmdb");
if (ipv6UserBytes!=null){
InputStream ipv6UserInputStream = new ByteArrayInputStream(ipv6UserBytes);
builder.loadDataFilePrivateV6(ipv6UserInputStream);
}
}
ipLookup = builder.build();
}catch (Exception e){
LOG.error("加载失败",e);
}
}
public static void updateIpLook(Map<String, byte[]> knowledgeFileCache){
try{
IpLookupV2.Builder builder = new IpLookupV2.Builder(false);
ipLookup= builder.loadDataFileV4(new ByteArrayInputStream(knowledgeFileCache.get("ip_v4_built_in.mmdb")))
.loadDataFileV6(new ByteArrayInputStream(knowledgeFileCache.get("ip_v6_built_in.mmdb")))
.loadDataFilePrivateV4(new ByteArrayInputStream(knowledgeFileCache.get("ip_v4_user_defined.mmdb")))
.loadDataFilePrivateV6(new ByteArrayInputStream(knowledgeFileCache.get("ip_v6_user_defined.mmdb")))
.build();
}catch (Exception e){
LOG.error("加载失败",e);
}
}
public static void main(String[] args) {
System.out.println(ipLookup.countryLookup("49.7.115.37"));

View File

@@ -1,11 +1,11 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.StringReader;
@@ -13,17 +13,18 @@ import java.util.Properties;
import java.util.concurrent.Executor;
public class NacosUtils {
private static final Logger logger = LoggerFactory.getLogger(NacosUtils.class);
// private static final Logger logger = LoggerFactory.getLogger(NacosUtils.class);
private static final Log logger = LogFactory.get();
private static Properties nacosProperties = new Properties();
private static Properties commonProperties = new Properties();
private static final String NACOS_SERVER_ADDR = CommonConfigurations.getStringProperty("nacos.server.addr");
private static final String NACOS_NAMESPACE = CommonConfigurations.getStringProperty("nacos.namespace");
private static final String NACOS_STATIC_NAMESPACE = CommonConfigurations.getStringProperty("nacos.static.namespace");
private static final String NACOS_USERNAME = CommonConfigurations.getStringProperty("nacos.username");
private static final String NACOS_PASSWORD = CommonConfigurations.getStringProperty("nacos.password");
private static final String NACOS_DATA_ID = CommonConfigurations.getStringProperty("nacos.data.id");
private static final String NACOS_GROUP = CommonConfigurations.getStringProperty("nacos.group");
private static final String NACOS_STATIC_DATA_ID = CommonConfigurations.getStringProperty("nacos.static.data.id");
private static final String NACOS_STATIC_GROUP = CommonConfigurations.getStringProperty("nacos.static.group");
private static final long NACOS_READ_TIMEOUT = CommonConfigurations.getLongProperty("nacos.read.timeout");
static {
@@ -32,7 +33,7 @@ public class NacosUtils {
private static void getProperties() {
nacosProperties.setProperty(PropertyKeyConst.SERVER_ADDR, NACOS_SERVER_ADDR);
nacosProperties.setProperty(PropertyKeyConst.NAMESPACE, NACOS_NAMESPACE);
nacosProperties.setProperty(PropertyKeyConst.NAMESPACE, NACOS_STATIC_NAMESPACE);
nacosProperties.setProperty(PropertyKeyConst.USERNAME, NACOS_USERNAME);
nacosProperties.setProperty(PropertyKeyConst.PASSWORD, NACOS_PASSWORD);
}
@@ -41,10 +42,11 @@ public class NacosUtils {
try {
getProperties();
ConfigService configService = NacosFactory.createConfigService(nacosProperties);
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
String config = configService.getConfig(NACOS_STATIC_DATA_ID, NACOS_STATIC_GROUP, NACOS_READ_TIMEOUT);
commonProperties.load(new StringReader(config));
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
configService.addListener(NACOS_STATIC_DATA_ID, NACOS_STATIC_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;

View File

@@ -1,11 +1,12 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SnowflakeId {
private static final Logger logger = LoggerFactory.getLogger(SnowflakeId.class);
// private static final Logger logger = LoggerFactory.getLogger(SnowflakeId.class);
private static final Log logger = LogFactory.get();
/**
* 共64位 第一位为符号位 默认0

View File

@@ -1,11 +1,11 @@
package com.zdjizhi.utils;
import cn.hutool.core.util.StrUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
@@ -13,7 +13,8 @@ import java.util.concurrent.CountDownLatch;
public class ZookeeperUtils implements Watcher {
private static final Logger logger = LoggerFactory.getLogger(ZookeeperUtils.class);
// private static final Logger logger = LoggerFactory.getLogger(ZookeeperUtils.class);
private static final Log logger = LogFactory.get();
private ZooKeeper zookeeper;

View File

@@ -5,7 +5,7 @@ stream.execution.environment.parallelism=1
stream.execution.job.name=DOS-DETECTION-APPLICATION
#输入kafka并行度大小
kafka.input.parallelism=1
kafka.input.parallelism=3
#输入kafka topic名
kafka.input.topic.name=DOS-SKETCH-RECORD
@@ -15,22 +15,22 @@ kafka.input.topic.name=DOS-SKETCH-RECORD
kafka.input.bootstrap.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#读取kafka group id
kafka.input.group.id=2112080949
kafka.input.group.id=dos-detection-job-221125-1
#kafka.input.group.id=dos-detection-job-210813-1
#发送kafka metrics并行度大小
kafka.output.metric.parallelism=1
kafka.output.metric.parallelism=3
#发送kafka metrics topic名
kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS
#kafka.output.metric.topic.name=test
#kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS
kafka.output.metric.topic.name=test
#发送kafka event并行度大小
kafka.output.event.parallelism=1
kafka.output.event.parallelism=3
#发送kafka event topic名
kafka.output.event.topic.name=DOS-EVENT
#kafka.output.event.topic.name=storm-dos-test
#kafka.output.event.topic.name=DOS-EVENT
kafka.output.event.topic.name=abcd
#kafka输出地址
kafka.output.bootstrap.servers=192.168.44.12:9094
@@ -38,6 +38,7 @@ kafka.output.bootstrap.servers=192.168.44.12:9094
#zookeeper地址
hbase.zookeeper.quorum=192.168.44.12:2181
#hbase.zookeeper.quorum=192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181
#hbase.zookeeper.quorum=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase客户端处理时间
@@ -51,7 +52,7 @@ hbase.baseline.table.name=dos:ddos_traffic_baselines
hbase.baseline.total.num=1000000
#baseline ttl单位
hbase.baseline.ttl=30
hbase.baseline.ttl=10
#设置聚合并行度2个key
flink.first.agg.parallelism=1
@@ -60,10 +61,10 @@ flink.first.agg.parallelism=1
flink.detection.map.parallelism=1
#watermark延迟
flink.watermark.max.orderness=10
flink.watermark.max.orderness=300
#计算窗口大小默认600s
flink.window.max.time=10
flink.window.max.time=600
#dos event结果中distinct source IP限制
source.ip.list.limit=10000
@@ -74,7 +75,7 @@ destination.ip.partition.num=10000
data.center.id.num=15
#IP mmdb库路径
ip.mmdb.path=D:\\data\\dat\\
ip.mmdb.path=D:\\data\\dat\\bak\\
#ip.mmdb.path=/home/bigdata/topology/dat/
#ip.mmdb.path=/home/bigdata/wlh/topology/dos-detection/dat/
@@ -82,9 +83,6 @@ ip.mmdb.path=D:\\data\\dat\\
bifang.server.uri=http://192.168.44.72:80
#bifang.server.uri=http://192.168.44.3:80
#访问bifang只读权限tokenbifang内置无需修改
bifang.server.token=ed04b942-7df4-4e3d-b9a9-a881ca98a867
#加密密码路径信息
bifang.server.encryptpwd.path=/v1/user/encryptpwd
@@ -122,17 +120,39 @@ baseline.threshold.schedule.days=1
#kafka用户认证配置参数
sasl.jaas.config.user=admin
#sasl.jaas.config.password=galaxy2019
#sasl.jaas.config.password=ENC(6MleDyA3Z73HSaXiKsDJ2k7Ys8YWLhEJ)
sasl.jaas.config.password=6MleDyA3Z73HSaXiKsDJ2k7Ys8YWLhEJ
#是否开启kafka用户认证配置10
sasl.jaas.config.flag=1
#nacos配置
############################## Nacos 配置 ######################################
nacos.server.addr=192.168.44.12:8848
nacos.namespace=test
nacos.username=nacos
nacos.password=nacos
nacos.data.id=dos_detection.properties
nacos.group=Galaxy
nacos.read.timeout=5000
nacos.read.timeout=5000
############################## Nacos ---知识库配置 ######################################
nacos.namespace=public
nacos.data.id=knowledge_base.json
nacos.group=DEFAULT_GROUP
############################## Nacos ---静态阈值配置 ######################################
nacos.static.namespace=test
nacos.static.data.id=dos_detection.properties
nacos.static.group=Galaxy
############################## hos Token 配置 ######################################
hos.token=c21f969b5f03d33d43e04f8f136e7682
############################# 选择集群模式或者单机模式 配置 ######################################
cluster.or.single=CLUSTER
#cluster.or.single=SINGLE
############################## 集群模式配置文件路径 配置 ######################################
hdfs.path=/test/TEST/
hdfs.uri.nn1=192.168.40.151:9000
hdfs.uri.nn2=192.168.40.152:9000
hdfs.user=dos
############################## 单机模式配置文件下载路径 配置 ######################################
download.path=D:\\ttt\\

View File

@@ -0,0 +1,58 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/tsg/olap/hadoop/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.logfile.size</name>
<value>10000000</value>
<description>The max size of each log file</description>
</property>
<property>
<name>hadoop.logfile.count</name>
<value>1</value>
<description>The max number of log files</description>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181</value>
</property>
<property>
<name>ipc.client.connect.timeout</name>
<value>90000</value>
</property>
</configuration>

View File

@@ -0,0 +1,142 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/tsg/olap/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/tsg/olap/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址nn1所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>192.168.40.151:9000</value>
</property>
<!-- nn1的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>192.168.40.151:50070</value>
</property>
<!-- nn2的RPC通信地址nn2所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>192.168.40.152:9000</value>
</property>
<!-- nn2的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>192.168.40.152:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://192.168.40.151:8485;192.168.40.152:8485;192.168.40.203:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/tsg/olap/hadoop/journal</value>
</property>
<!--客户端通过代理访问namenode访问文件系统HDFS 客户端与Active 节点通信的Java 类使用其确定Active 节点是否活跃 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(true)</value>
</property>
<!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间这个属性同上如果你是用脚本的方法切换这个应该是可以不配置的 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>8192</value>
</property>
<!-- namenode处理RPC请求线程数增大该值资源占用不大 -->
<property>
<name>dfs.namenode.handler.count</name>
<value>30</value>
</property>
<!-- datanode处理RPC请求线程数增大该值会占用更多内存 -->
<property>
<name>dfs.datanode.handler.count</name>
<value>40</value>
</property>
<!-- balance时可占用的带宽 -->
<property>
<name>dfs.balance.bandwidthPerSec</name>
<value>104857600</value>
</property>
<!-- 磁盘预留空间该空间不会被hdfs占用单位字节-->
<property>
<name>dfs.datanode.du.reserved</name>
<value>5368709120</value>
</property>
<!-- datanode与namenode连接超时时间单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
<property>
<name>heartbeat.recheck.interval</name>
<value>100000</value>
</property>
</configuration>

View File

@@ -0,0 +1,23 @@
#Log4j
log4j.rootLogger=info,console,file
# 控制台日志设置
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=info
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# 文件日志设置
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=info
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
#路径请用相对路径,做好相关测试输出到应用目下
log4j.appender.file.file=${nis.root}/log/flink-dos-detection.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n
#MyBatis 配置com.nis.web.dao是mybatis接口所在包
log4j.logger.com.nis.web.dao=debug
#bonecp数据源配置
log4j.category.com.jolbox=debug,console

View File

@@ -0,0 +1,196 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>rmcluster</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rsm1,rsm2</value>
</property>
<!-- 配置rm1-->
<property>
<name>yarn.resourcemanager.hostname.rsm1</name>
<value>192.168.40.152</value>
</property>
<property>
<!--<name>yarn.resourcemanager.hostname.rm1</name>-->
<name>yarn.resourcemanager.address.rsm1</name>
<value>192.168.40.152:9916</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rsm1</name>
<value>192.168.40.152:9917</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rsm1</name>
<value>192.168.40.152:9918</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rsm1</name>
<value>192.168.40.152:9919</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
<value>192.168.40.152:9920</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rsm1</name>
<value>192.168.40.152:23142</value>
</property>
<!-- 配置rm2-->
<property>
<name>yarn.resourcemanager.hostname.rsm2</name>
<value>192.168.40.203</value>
</property>
<property>
<!--<name>yarn.resourcemanager.hostname.rm1</name>-->
<name>yarn.resourcemanager.address.rsm2</name>
<value>192.168.40.203:9916</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rsm2</name>
<value>192.168.40.203:9917</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rsm2</name>
<value>192.168.40.203:9918</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rsm2</name>
<value>192.168.40.203:9919</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
<value>192.168.40.203:9920</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rsm2</name>
<value>192.168.40.203:23142</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181</value>
</property>
<!--启用自动恢复当任务进行一半rm坏掉就要启动自动恢复默认是false-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--启动nm自动恢复当集群重启container开启自动恢复保障任务的可靠性默认为false启动该配置需开启下面配置 -->
<property>
<name>yarn.nodemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--开启nm故障恢复后nm元数据存储路径 -->
<property>
<name>yarn.nodemanager.recovery.dir</name>
<value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
</property>
<!--启用nm恢复时监控功能开启后不会去尝试清理container默认false -->
<property>
<name>yarn.nodemanager.recovery.supervised</name>
<value>true</value>
</property>
<!--配置nm可用的RPC地址默认${yarn.nodemanager.hostname}:0为临时端口。集群重启后nm与rm连接的端口会变化这里指定端口保障nm restart功能 -->
<property>
<name>yarn.nodemanager.address</name>
<value>${yarn.nodemanager.hostname}:9923</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>30720</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>30720</value>
</property>
<!--开启日志聚合 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.nodemanager.heartbeat-interval-ms</name>
<value>3000</value>
</property>
<!--日志保留7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<property>
<name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
<value>3600</value>
</property>
<property>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/tmp/logs</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>14</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-vcores</name>
<value>1</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-vcores</name>
<value>14</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.disk-health-checker.enable</name>
<value>false</value>
</property>
<!--ApplicationMaster重启次数配置HA后默认为2-->
<property>
<name>yarn.resourcemanager.am.max-attempts</name>
<value>10000</value>
</property>
<property>
<name>yarn.log.server.url</name>
<value>http://bigdata-151:19888/jobhistory/logs</value>
</property>
</configuration>