58 Commits

Author SHA1 Message Date
unknown
d8b0a7637b 新增命中静态阈值后填充Profile ID 2023-04-03 17:35:36 +08:00
unknown
b56a2ec31e GAL-306 修复DoS检测不能读取HDFS中IP定位库问题,支持yarn per-job运行模式 2023-03-27 17:15:27 +08:00
unknown
11747d9964 GAL-296 解决使用Yarn模式运行时的依赖冲突问题 2023-03-07 18:58:25 +08:00
wanglihui
ce15a27a1b TSG-13094 修复DoS Event日志出现MVsys id 2022-12-21 17:11:14 +08:00
wanglihui
01bbe562c9 Merge branch 'tsg-22.11' of git.mesalab.cn:bigdata/tsg/flink-dos-detection into tsg-22.11 2022-12-19 10:18:44 +08:00
wanglihui
f07651cf14 修改vsys id字段名为common_t_vsys_id 2022-12-19 10:18:17 +08:00
unknown
7c201a8a3f 新增Nacos Namespace配置,删除更新至HDFS时Flush操作 2022-12-16 16:52:33 +08:00
wanglihui
78435d54ea Merge branch 'knowledge' of https://git.mesalab.cn/bigdata/tsg/flink-dos-detection into tsg-22.11 2022-12-06 19:11:30 +08:00
wanglihui
76c9247bb9 Merge branch 'knowledge' of https://git.mesalab.cn/bigdata/tsg/flink-dos-detection into tsg-22.11 2022-12-06 19:10:28 +08:00
wanglihui
488b7c6644 修改部分日志输出 2022-12-06 17:13:09 +08:00
unknown
0662d265dd GAL-224 取消SSL检测,新增HDFS高可用设置 2022-11-28 16:42:56 +08:00
unknown
87fe11dc93 优化单机模式落地方式 2022-11-28 15:38:36 +08:00
unknown
9a2a5b3957 GAL-224 DoS检测支持知识库动态加载 2022-11-23 15:30:24 +08:00
fy
c58acdcfc9 Flink连接知识库实现方案初始准备 2022-11-14 14:34:00 +08:00
wanglihui
b409150532 修复提交yarn集群依赖冲突bug 2022-10-17 18:15:11 +08:00
wanglihui
7e6d5fcfc5 修复无法vsys id 2022-10-17 11:10:35 +08:00
wanglihui
859cd379e5 DoS 检测支持vsys id 2022-09-23 18:37:33 +08:00
wanglihui
47ddef9bca DoS 检测事件日志默认VSYS ID 为 1 2022-08-19 10:17:52 +08:00
wanglihui
0a6f36393c DoS Detection Bifang Access Token 可支持动态配置 2022-07-19 17:20:09 +08:00
unknown
84a1e6879a 修复动态获取nacos配置 2022-06-28 11:15:49 +08:00
unknown
ab8f6aba81 修复动态获取nacos配置 2022-06-28 11:14:54 +08:00
unknown
94e8fb807a 集成nacos,修复依赖冲突,启动报错 2022-06-22 15:58:49 +08:00
wanglihui
cead1d4d99 Merge branch 'tsg-22.06' of git.mesalab.cn:bigdata/tsg/flink-dos-detection 2022-06-20 18:15:24 +08:00
徐鹏飞
2d98c3b6e6 修改获取静态阈值逻辑,根据默认指定vsysID获取 2022-06-13 18:00:57 +08:00
徐鹏飞
3dc29a07be 新增根据vsysId从bifang获取静态阈值 2022-05-31 18:00:36 +08:00
wanglihui
1fcdb79739 DoS事件日志对Conditions基于速率检测属性值修正 2022-05-17 13:54:43 +08:00
wanglihui
3d974217d9 集成nacos,将部分配置放到nacos中管理。 2022-04-08 10:16:29 +08:00
wanglihui
db17064f73 更新工具库到1.0.8适配新版MMDB定位库 2022-02-11 16:06:24 +08:00
wanglihui
065e5abb09 增加jasypt配置加密方式 2021-12-20 13:50:18 +08:00
wanglihui
75bbdd2962 更新DoS检测程序,新增读取baseline TTL配置。
修复DoS检测-Conditions阈值描述语言逻辑问题。
2021-12-08 13:51:09 +08:00
wanglihui
c46a395d9b 修改静态阈值,上下限判定等配置。 2021-11-26 09:46:53 +08:00
wanglihui
cc3f0cf620 解决获取配置为空时空指针异常bug。 2021-11-05 19:01:12 +08:00
wanglihui
0617b1e614 修改获取基线值策略,当基线type=3且基线值小于静态敏感阈值时,将其替换。 2021-10-29 18:43:05 +08:00
wanglihui
0125b031dd 代码格式化,实体类重写equals、hashcode方法。 2021-10-22 18:38:29 +08:00
wanglihui
177e7461cc 优化构建baseline方式 2021-10-21 18:27:48 +08:00
wanglihui
be916531fb 修改构建threshold RangeMap逻辑,基于attack type为key,避免IP冲突问题。 2021-10-20 18:23:12 +08:00
wanglihui
c692112445 增加基于packets与bits作为static条件判断依据。
修复static配置IP冲突问题。
2021-10-19 18:39:13 +08:00
wanglihui
b03ab9642d 修复静态条件与基线条件冲突bug 2021-09-28 16:11:52 +08:00
wanglihui
c44250bf73 新增读取DoS Detection Profiles IP冲突检测机制
修复DoS event日志end_time大于当前时间bug
2021-09-26 18:41:36 +08:00
wanglihui
77bc6a844e 修复读取配置IP冲突问题 2021-09-23 18:36:27 +08:00
wanglihui
e930fa23ed 修改判定逻辑,增加基线敏感阈值作为判定条件。 2021-09-16 18:47:00 +08:00
wanglihui
8cd4dea19e 获取静态阈值列表使用bifang默认token
修复获取baseline下标方法
2021-09-15 10:08:17 +08:00
wanglihui
62f3c65d66 基于DoS Sketch一元组进行实时检测 2021-09-14 18:46:23 +08:00
wanglihui
8cfb442c44 增加一元组作为基线生成数据源 2021-09-13 14:14:58 +08:00
wanglihui
4f8807dfa1 修改计算速率方式,使用session总数除以时间窗口 2021-09-13 09:46:02 +08:00
wanglihui
81f6499458 新增敏感阈值,过滤告警信息
修改计算平均值方式,先聚合再平均
2021-09-09 10:46:50 +08:00
wanglihui
b4237bb4a9 新增kafka sasl认证机制 2021-09-06 16:19:33 +08:00
wanglihui
c5943298bd 修复因double精度问题导致日志判定结果等级错误bug 2021-08-26 18:42:28 +08:00
wanglihui
b4f919647a 新增根据静态阈值判定dos攻击逻辑
新增定时器,定时获取静态阈值与baseline
2021-08-24 16:35:31 +08:00
wanglihui
55af33b508 新增解析静态阈值功能 2021-08-20 18:34:40 +08:00
wanglihui
28e7275674 新增rangeMap存储对应IP段配置信息 2021-08-20 11:52:20 +08:00
wanglihui
f744677021 新增读取bifang静态阈值配置接口
修改galaxy工具类库版本
2021-08-18 19:15:49 +08:00
wanglihui
c957f3ec1c 增加基线值为0时处理逻辑,将0替换为默认值。 2021-08-17 18:56:53 +08:00
wanglihui
9bda526d48 修复判定超出基线百分比逻辑bug 2021-08-17 11:09:45 +08:00
wanglihui
e89e1b08c9 修改处理逻辑,去掉处理机IP与数据中心作为key的判定条件。 2021-08-16 18:24:13 +08:00
wanglihui
e0de04886b 添加配置文件注释、删除过期配置文件 2021-08-11 18:38:53 +08:00
wanglihui
30a24683e3 metrics统计增加根据server IP hashcode分区数。 2021-08-09 18:28:52 +08:00
wanglihui
5190654a8f 修改处理逻辑为内存中缓存baseline数据 2021-08-05 18:42:34 +08:00
49 changed files with 3529 additions and 520 deletions

230
pom.xml
View File

@@ -12,6 +12,8 @@
<flink.version>1.13.1</flink.version>
<hive.version>2.1.1</hive.version>
<hadoop.version>2.7.1</hadoop.version>
<scala.binary.version>2.11</scala.binary.version>
<jsonpath.version>2.4.0</jsonpath.version>
</properties>
<repositories>
@@ -96,81 +98,59 @@
</execution>
</executions>
</plugin>
</plugins>
<resources>
<resource>
<directory>properties</directory>
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
</includes>
<filtering>false</filtering>
</resource>
<resource>
<directory>src\main</directory>
<includes>
<include>log4j.properties</include>
</includes>
<filtering>false</filtering>
</resource>
</resources>
</build>
<dependencies>
<!-- https://mvnrepository.com/artifact/org.jasypt/jasypt -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.21</version>
<groupId>org.jasypt</groupId>
<artifactId>jasypt</artifactId>
<version>1.9.3</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.21</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-sql-connector-kafka_2.11</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_2.11</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
<!--&lt;!&ndash; https://mvnrepository.com/artifact/org.apache.flink/flink-table &ndash;&gt;-->
<!--<dependency>-->
<!--<groupId>org.apache.flink</groupId>-->
<!--<artifactId>flink-table</artifactId>-->
<!--<version>${flink.version}</version>-->
<!--&lt;!&ndash;<scope>provided</scope>&ndash;&gt;-->
<!--<groupId>org.slf4j</groupId>-->
<!--<artifactId>slf4j-api</artifactId>-->
<!--<version>1.7.21</version>-->
<!--</dependency>-->
<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-json -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-json</artifactId>
<version>${flink.version}</version>
<scope>provided</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.0.0</version>
</dependency>
<!--Flink modules-->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-api-java</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
<!--<dependency>-->
<!--<groupId>org.slf4j</groupId>-->
<!--<artifactId>slf4j-log4j12</artifactId>-->
<!--<version>1.7.21</version>-->
<!--</dependency>-->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-planner-blink_2.11</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>${jsonpath.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-planner_2.11</artifactId>
<artifactId>flink-connector-kafka_2.12</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
@@ -178,7 +158,7 @@
<!-- CLI dependencies -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_2.11</artifactId>
<artifactId>flink-clients_2.12</artifactId>
<version>${flink.version}</version>
<!--<scope>provided</scope>-->
</dependency>
@@ -186,6 +166,7 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.1</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<artifactId>zookeeper</artifactId>
@@ -195,6 +176,52 @@
<artifactId>jdk.tools</artifactId>
<groupId>jdk.tools</groupId>
</exclusion>
<exclusion>
<artifactId>guava</artifactId>
<groupId>com.google.guava</groupId>
</exclusion>
<exclusion>
<artifactId>slf4j-api</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- <dependency>-->
<!-- <groupId>org.apache.hadoop</groupId>-->
<!-- <artifactId>hadoop-hdfs</artifactId>-->
<!-- <version>${hadoop.version}</version>-->
<!-- <scope>provided</scope>-->
<!-- </dependency>-->
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>2.2.3</version>
<!--<scope>provided</scope>-->
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>log4j-over-slf4j</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>slf4j-api</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>hadoop-common</artifactId>
<groupId>org.apache.hadoop</groupId>
</exclusion>
</exclusions>
</dependency>
@@ -214,11 +241,10 @@
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-hbase-2.2_2.11</artifactId>
<version>${flink.version}</version>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.6</version>
</dependency>
<dependency>
@@ -227,10 +253,16 @@
<version>5.5.2</version>
</dependency>
<dependency>
<groupId>com.github.seancfoley</groupId>
<artifactId>ipaddress</artifactId>
<version>5.3.3</version>
</dependency>
<dependency>
<groupId>com.zdjizhi</groupId>
<artifactId>galaxy</artifactId>
<version>1.0.4</version>
<version>1.1.1</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
@@ -246,6 +278,76 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.alibaba.nacos</groupId>
<artifactId>nacos-client</artifactId>
<version>1.2.0</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<artifactId>slf4j-api</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-codec/commons-codec -->
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.11</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-databind -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.9.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.google.guava/guava -->
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>22.0</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.2</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>2.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-java_${scala.binary.version}</artifactId>
<version>${flink.version}</version>
</dependency>
</dependencies>

View File

@@ -1,19 +1,31 @@
package com.zdjizhi.common;
import com.zdjizhi.utils.CommonConfigurations;
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
/**
* Created by wk on 2021/1/6.
* @author wlh
* @date 2021/1/6
*/
public class CommonConfig {
/**
* 定位库默认分隔符
*/
public static final String LOCATION_SEPARATOR = ".";
private static StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
static {
encryptor.setPassword("galaxy");
}
public static final int STREAM_EXECUTION_ENVIRONMENT_PARALLELISM = CommonConfigurations.getIntProperty("stream.execution.environment.parallelism");
public static final String STREAM_EXECUTION_JOB_NAME = CommonConfigurations.getStringProperty("stream.execution.job.name");
public static final int KAFKA_INPUT_PARALLELISM = CommonConfigurations.getIntProperty("kafka.input.parallelism");
public static final String KAFKA_INPUT_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.input.topic.name");
public static final String KAFKA_INPUT_BOOTSTRAP_SERVERS = CommonConfigurations.getStringProperty("kafka.input.bootstrap.servers");
public static final String KAFKA_SCAN_STARTUP_MODE = CommonConfigurations.getStringProperty("kafka.input.scan.startup.mode");
public static final String KAFKA_GROUP_ID = CommonConfigurations.getStringProperty("kafka.input.group.id");
public static final int KAFKA_OUTPUT_METRIC_PARALLELISM = CommonConfigurations.getIntProperty("kafka.output.metric.parallelism");
@@ -22,28 +34,77 @@ public class CommonConfig {
public static final String KAFKA_OUTPUT_EVENT_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.output.event.topic.name");
public static final String KAFKA_OUTPUT_BOOTSTRAP_SERVERS = CommonConfigurations.getStringProperty("kafka.output.bootstrap.servers");
public static final int HBASE_INPUT_PARALLELISM = CommonConfigurations.getIntProperty("hbase.input.parallelism");
public static final String HBASE_ZOOKEEPER_QUORUM = CommonConfigurations.getStringProperty("hbase.zookeeper.quorum");
public static final int HBASE_CLIENT_OPERATION_TIMEOUT = CommonConfigurations.getIntProperty("hbase.client.operation.timeout");
public static final int HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD = CommonConfigurations.getIntProperty("hbase.client.scanner.timeout.period");
public static final String HBASE_BASELINE_TABLE_NAME = CommonConfigurations.getStringProperty("hbase.baseline.table.name");
public static final int HBASE_BASELINE_TOTAL_NUM = CommonConfigurations.getIntProperty("hbase.baseline.total.num");
public static final int HBASE_BASELINE_TTL = CommonConfigurations.getIntProperty("hbase.baseline.ttl");
public static final int FLINK_FIRST_AGG_PARALLELISM = CommonConfigurations.getIntProperty("flink.first.agg.parallelism");
public static final int FLINK_SECOND_AGG_PARALLELISM = CommonConfigurations.getIntProperty("flink.second.agg.parallelism");
public static final int FLINK_DETECTION_MAP_PARALLELISM = CommonConfigurations.getIntProperty("flink.detection.map.parallelism");
public static final int FLINK_WATERMARK_MAX_ORDERNESS = CommonConfigurations.getIntProperty("flink.watermark.max.orderness");
public static final int FLINK_WINDOW_MAX_TIME = CommonConfigurations.getIntProperty("flink.window.max.time");
public static final int SOURCE_IP_LIST_LIMIT = CommonConfigurations.getIntProperty("source.ip.list.limit");
public static final int DESTINATION_IP_PARTITION_NUM = CommonConfigurations.getIntProperty("destination.ip.partition.num");
public static final int DATA_CENTER_ID_NUM = CommonConfigurations.getIntProperty("data.center.id.num");
public static final String IP_MMDB_PATH = CommonConfigurations.getStringProperty("ip.mmdb.path");
public static final double BASELINE_SESSIONS_MINOR_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.minor.threshold");
public static final double BASELINE_SESSIONS_WARNING_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.warning.threshold");
public static final double BASELINE_SESSIONS_MAJOR_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.major.threshold");
public static final double BASELINE_SESSIONS_SEVERE_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.severe.threshold");
public static final double BASELINE_SESSIONS_CRITICAL_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.critical.threshold");
public static final String BIFANG_SERVER_URI = CommonConfigurations.getStringProperty("bifang.server.uri");
public static final String BIFANG_SERVER_ENCRYPTPWD_PATH = CommonConfigurations.getStringProperty("bifang.server.encryptpwd.path");
public static final String BIFANG_SERVER_LOGIN_PATH = CommonConfigurations.getStringProperty("bifang.server.login.path");
public static final String BIFANG_SERVER_POLICY_THRESHOLD_PATH = CommonConfigurations.getStringProperty("bifang.server.policy.threshold.path");
public static final String BIFANG_SERVER_POLICY_VSYSID_PATH = CommonConfigurations.getStringProperty("bifang.server.policy.vaysid.path");
public static final int HTTP_POOL_MAX_CONNECTION = CommonConfigurations.getIntProperty("http.pool.max.connection");
public static final int HTTP_POOL_MAX_PER_ROUTE = CommonConfigurations.getIntProperty("http.pool.max.per.route");
public static final int HTTP_POOL_REQUEST_TIMEOUT = CommonConfigurations.getIntProperty("http.pool.request.timeout");
public static final int HTTP_POOL_CONNECT_TIMEOUT = CommonConfigurations.getIntProperty("http.pool.connect.timeout");
public static final int HTTP_POOL_RESPONSE_TIMEOUT = CommonConfigurations.getIntProperty("http.pool.response.timeout");
public static final int STATIC_THRESHOLD_SCHEDULE_MINUTES = CommonConfigurations.getIntProperty("static.threshold.schedule.minutes");
public static final int BASELINE_THRESHOLD_SCHEDULE_DAYS = CommonConfigurations.getIntProperty("baseline.threshold.schedule.days");
public static final String SASL_JAAS_CONFIG_USER = CommonConfigurations.getStringProperty("sasl.jaas.config.user");
public static final String SASL_JAAS_CONFIG_PASSWORD = encryptor.decrypt(CommonConfigurations.getStringProperty("sasl.jaas.config.password"));
public static final int SASL_JAAS_CONFIG_FLAG = CommonConfigurations.getIntProperty("sasl.jaas.config.flag");
public static final String NACOS_SERVER_ADDR = CommonConfigurations.getStringProperty("nacos.server.addr");
public static final String NACOS_USERNAME = CommonConfigurations.getStringProperty("nacos.username");
public static final String NACOS_PASSWORD = CommonConfigurations.getStringProperty("nacos.password");
public static final String NACOS_NAMESPACE = CommonConfigurations.getStringProperty("nacos.namespace");
public static final String NACOS_DATA_ID = CommonConfigurations.getStringProperty("nacos.data.id");
public static final String NACOS_GROUP = CommonConfigurations.getStringProperty("nacos.group");
public static final int NACOS_READ_TIMEOUT = CommonConfigurations.getIntProperty("nacos.read.timeout");
public static final String HOS_TOKEN = CommonConfigurations.getStringProperty("hos.token");
public static final String CLUSTER_OR_SINGLE = CommonConfigurations.getStringProperty("cluster.or.single");
public static final String HDFS_URI_NS1 = CommonConfigurations.getStringProperty("hdfs.uri.nn1");
public static final String HDFS_URI_NS2 = CommonConfigurations.getStringProperty("hdfs.uri.nn2");
public static final String HDFS_PATH = CommonConfigurations.getStringProperty("hdfs.path");
public static final String HDFS_USER = CommonConfigurations.getStringProperty("hdfs.user");
public static final String DOWNLOAD_PATH = CommonConfigurations.getStringProperty("download.path");
public static void main(String[] args) {
StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
// 配置加密解密的密码/salt值
encryptor.setPassword("galaxy");
// 对"raw_password"进行加密S5kR+Y7CI8k7MaecZpde25yK8NKUnd6p
// String password = "galaxy2019";
String password = "nacos";
String encPwd = encryptor.encrypt(password);
System.out.println(encPwd);
// 再进行解密raw_password
String rawPwd = encryptor.decrypt(encPwd);
System.out.println(rawPwd);
}
}

View File

@@ -0,0 +1,26 @@
package com.zdjizhi.common;
import java.io.Serializable;
public class CustomFile implements Serializable {
String fileName;
byte[] content;
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public byte[] getContent() {
return content;
}
public void setContent(byte[] content) {
this.content = content;
}
}

View File

@@ -0,0 +1,63 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Objects;
public class DosBaselineThreshold implements Serializable {
private ArrayList<Integer> session_rate;
private Integer session_rate_baseline_type;
private Integer session_rate_default_value;
public ArrayList<Integer> getSession_rate() {
return session_rate;
}
public void setSession_rate(ArrayList<Integer> session_rate) {
this.session_rate = session_rate;
}
public Integer getSession_rate_baseline_type() {
return session_rate_baseline_type;
}
public void setSession_rate_baseline_type(Integer session_rate_baseline_type) {
this.session_rate_baseline_type = session_rate_baseline_type;
}
public Integer getSession_rate_default_value() {
return session_rate_default_value;
}
public void setSession_rate_default_value(Integer session_rate_default_value) {
this.session_rate_default_value = session_rate_default_value;
}
@Override
public String toString() {
return "DosBaselineThreshold{" +
"session_rate=" + session_rate +
", session_rate_baseline_type=" + session_rate_baseline_type +
", session_rate_default_value=" + session_rate_default_value +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof DosBaselineThreshold)) {
return false;
}
DosBaselineThreshold that = (DosBaselineThreshold) o;
return Objects.equals(getSession_rate(), that.getSession_rate()) &&
Objects.equals(getSession_rate_baseline_type(), that.getSession_rate_baseline_type()) &&
Objects.equals(getSession_rate_default_value(), that.getSession_rate_default_value());
}
@Override
public int hashCode() {
return Objects.hash(getSession_rate(), getSession_rate_baseline_type(), getSession_rate_default_value());
}
}

View File

@@ -0,0 +1,118 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Objects;
/**
* @author wlh
*/
public class DosDetectionThreshold implements Serializable {
private long profileId;
private String attackType;
private ArrayList<String> serverIpList;
private String serverIpAddr;
private long packetsPerSec;
private long bitsPerSec;
private long sessionsPerSec;
private int isValid;
private int vsysId;
private Integer[] superiorIds;
public long getProfileId() {
return profileId;
}
public void setProfileId(long profileId) {
this.profileId = profileId;
}
public String getAttackType() {
return attackType;
}
public void setAttackType(String attackType) {
this.attackType = attackType;
}
public ArrayList<String> getServerIpList() {
return serverIpList;
}
public void setServerIpList(ArrayList<String> serverIpList) {
this.serverIpList = serverIpList;
}
public String getServerIpAddr() {
return serverIpAddr;
}
public void setServerIpAddr(String serverIpAddr) {
this.serverIpAddr = serverIpAddr;
}
public long getPacketsPerSec() {
return packetsPerSec;
}
public void setPacketsPerSec(long packetsPerSec) {
this.packetsPerSec = packetsPerSec;
}
public long getBitsPerSec() {
return bitsPerSec;
}
public void setBitsPerSec(long bitsPerSec) {
this.bitsPerSec = bitsPerSec;
}
public long getSessionsPerSec() {
return sessionsPerSec;
}
public void setSessionsPerSec(long sessionsPerSec) {
this.sessionsPerSec = sessionsPerSec;
}
public int getIsValid() {
return isValid;
}
public void setIsValid(int isValid) {
this.isValid = isValid;
}
public int getVsysId() {
return vsysId;
}
public void setVsysId(int vsysId) {
this.vsysId = vsysId;
}
public Integer[] getSuperiorIds() {
return superiorIds;
}
public void setSuperiorIds(Integer[] superiorIds) {
this.superiorIds = superiorIds;
}
@Override
public String toString() {
return "DosDetectionThreshold{" +
"profileId='" + profileId + '\'' +
", attackType='" + attackType + '\'' +
", serverIpList=" + serverIpList +
", serverIpAddr='" + serverIpAddr + '\'' +
", packetsPerSec=" + packetsPerSec +
", bitsPerSec=" + bitsPerSec +
", sessionsPerSec=" + sessionsPerSec +
", isValid=" + isValid +
", vsysId=" + vsysId +
", superiorIds=" + Arrays.toString(superiorIds) +
'}';
}
}

View File

@@ -2,11 +2,13 @@ package com.zdjizhi.common;
import java.io.Serializable;
public class DosEventLog implements Serializable {
public class DosEventLog implements Serializable,Cloneable {
private long log_id;
private int vsys_id;
private long start_time;
private long end_time;
private long profile_id;
private String attack_type;
private String severity;
private String conditions;
@@ -26,6 +28,14 @@ public class DosEventLog implements Serializable {
this.log_id = log_id;
}
public int getVsys_id() {
return vsys_id;
}
public void setVsys_id(int vsys_id) {
this.vsys_id = vsys_id;
}
public long getStart_time() {
return start_time;
}
@@ -42,6 +52,14 @@ public class DosEventLog implements Serializable {
this.end_time = end_time;
}
public long getProfile_id() {
return profile_id;
}
public void setProfile_id(long profile_id) {
this.profile_id = profile_id;
}
public String getAttack_type() {
return attack_type;
}
@@ -124,10 +142,12 @@ public class DosEventLog implements Serializable {
@Override
public String toString() {
return "dosEventLog{" +
return "DosEventLog{" +
"log_id=" + log_id +
", vsys_id=" + vsys_id +
", start_time=" + start_time +
", end_time=" + end_time +
", profile_id=" + profile_id +
", attack_type='" + attack_type + '\'' +
", severity='" + severity + '\'' +
", conditions='" + conditions + '\'' +
@@ -140,4 +160,9 @@ public class DosEventLog implements Serializable {
", bit_rate=" + bit_rate +
'}';
}
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone();
}
}

View File

@@ -1,17 +1,26 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.Objects;
public class DosMetricsLog implements Serializable {
private long sketch_start_time;
private String common_sled_ip;
private String common_data_center;
private String attack_type;
private String destination_ip;
private long session_rate;
private long packet_rate;
private long bit_rate;
private int partition_num;
private int vsys_id;
public int getPartition_num() {
return partition_num;
}
public void setPartition_num(int partition_num) {
this.partition_num = partition_num;
}
public long getSketch_start_time() {
return sketch_start_time;
@@ -21,22 +30,6 @@ public class DosMetricsLog implements Serializable {
this.sketch_start_time = sketch_start_time;
}
public String getCommon_sled_ip() {
return common_sled_ip;
}
public void setCommon_sled_ip(String common_sled_ip) {
this.common_sled_ip = common_sled_ip;
}
public String getCommon_data_center() {
return common_data_center;
}
public void setCommon_data_center(String common_data_center) {
this.common_data_center = common_data_center;
}
public String getAttack_type() {
return attack_type;
}
@@ -77,17 +70,25 @@ public class DosMetricsLog implements Serializable {
this.bit_rate = bit_rate;
}
public int getVsys_id() {
return vsys_id;
}
public void setVsys_id(int vsys_id) {
this.vsys_id = vsys_id;
}
@Override
public String toString() {
return "DosMetricsLog{" +
"sketch_start_time=" + sketch_start_time +
", common_sled_ip='" + common_sled_ip + '\'' +
", common_data_center='" + common_data_center + '\'' +
", attack_type='" + attack_type + '\'' +
", destination_ip='" + destination_ip + '\'' +
", session_rate=" + session_rate +
", packet_rate=" + packet_rate +
", bit_rate=" + bit_rate +
", partition_num=" + partition_num +
", vsys_id=" + vsys_id +
'}';
}
}

View File

@@ -1,6 +1,7 @@
package com.zdjizhi.common;
import java.io.Serializable;
import java.util.Objects;
public class DosSketchLog implements Serializable {
@@ -14,6 +15,7 @@ public class DosSketchLog implements Serializable {
private long sketch_sessions;
private long sketch_packets;
private long sketch_bytes;
private int vsys_id;
@Override
public String toString() {
@@ -28,6 +30,7 @@ public class DosSketchLog implements Serializable {
", sketch_sessions=" + sketch_sessions +
", sketch_packets=" + sketch_packets +
", sketch_bytes=" + sketch_bytes +
", vsys_id=" + vsys_id +
'}';
}
@@ -110,4 +113,12 @@ public class DosSketchLog implements Serializable {
public void setSketch_bytes(long sketch_bytes) {
this.sketch_bytes = sketch_bytes;
}
public int getVsys_id() {
return vsys_id;
}
public void setVsys_id(int vsys_id) {
this.vsys_id = vsys_id;
}
}

View File

@@ -0,0 +1,32 @@
package com.zdjizhi.common;
import java.util.Arrays;
public class DosVsysId {
private Integer id;
private Integer[] superiorIds;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Integer[] getSuperiorIds() {
return superiorIds;
}
public void setSuperiorIds(Integer[] superiorIds) {
this.superiorIds = superiorIds;
}
@Override
public String toString() {
return "DosVsysId{" +
"id=" + id +
", superiorIds=" + Arrays.toString(superiorIds) +
'}';
}
}

View File

@@ -0,0 +1,91 @@
package com.zdjizhi.common;
public class KnowledgeLog {
public String id;
public String name;
public String path;
public Long size;
public String format;
public String sha256;
public String version;
public String updateTime;
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getPath() {
return path;
}
public void setPath(String path) {
this.path = path;
}
public Long getSize() {
return size;
}
public void setSize(Long size) {
this.size = size;
}
public String getFormat() {
return format;
}
public void setFormat(String format) {
this.format = format;
}
public String getSha256() {
return sha256;
}
public void setSha256(String sha256) {
this.sha256 = sha256;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
public String getUpdateTime() {
return updateTime;
}
public void setUpdateTime(String updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "KnowledgeLog{" +
"id='" + id + '\'' +
", name='" + name + '\'' +
", path='" + path + '\'' +
", size=" + size +
", format='" + format + '\'' +
", sha256='" + sha256 + '\'' +
", version='" + version + '\'' +
", updateTime='" + updateTime + '\'' +
'}';
}
}

View File

@@ -1,114 +1,196 @@
package com.zdjizhi.etl;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosEventLog;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.sink.OutputStreamSink;
import com.zdjizhi.utils.IpUtils;
import com.zdjizhi.utils.SnowflakeId;
import org.apache.commons.lang.StringUtils;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.typeutils.MapTypeInfo;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.*;
import com.zdjizhi.utils.*;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.text.NumberFormat;
import java.text.ParseException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.*;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* @author wlh
* DoS检测判断逻辑
*/
public class DosDetection extends BroadcastProcessFunction<DosSketchLog, Map<String, Map<String, List<Integer>>>, DosEventLog> {
public class DosDetection extends BroadcastProcessFunction<DosSketchLog,Map<String, byte[]>, DosEventLog> {
private static final Logger logger = LoggerFactory.getLogger(DosDetection.class);
private static final Log logger = LogFactory.get();
private static Map<String, Map<String, DosBaselineThreshold>> baselineMap = new HashMap<>();
private final static NumberFormat PERCENT_INSTANCE = NumberFormat.getPercentInstance();
private HashMap<Integer,HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> thresholdRangeMap;
private final static int BASELINE_SIZE = 144;
private final static int STATIC_CONDITION_TYPE = 1;
private final static int BASELINE_CONDITION_TYPE = 2;
private final static int SENSITIVITY_CONDITION_TYPE = 3;
private static MapStateDescriptor<String, Map<String, Map<String, List<Integer>>>> descriptor = new MapStateDescriptor<>("boradcast-state",
Types.STRING,
new MapTypeInfo<>(String.class, new MapTypeInfo<>(String.class, (Class<List<Integer>>) (Class<?>) List.class).getTypeClass()));
private final static String SESSIONS_TAG = "sessions";
private final static String PACKETS_TAG = "packets";
private final static String BITS_TAG = "bits";
private final static NumberFormat PERCENT_INSTANCE = NumberFormat.getPercentInstance();
private final static int OTHER_BASELINE_TYPE = 3;
@Override
public void open(Configuration parameters) {
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(2,
new BasicThreadFactory.Builder().namingPattern("Dos-Detection-%d").daemon(true).build());
try {
super.open(parameters);
logger.info("begin init");
IpUtils.loadIpLook();
logger.info("init over");
executorService.scheduleAtFixedRate(() -> thresholdRangeMap = ParseStaticThreshold.createStaticThreshold(), 0,
CommonConfig.STATIC_THRESHOLD_SCHEDULE_MINUTES, TimeUnit.MINUTES);
executorService.scheduleAtFixedRate(() -> baselineMap = ParseBaselineThreshold.readFromHbase(), 0,
CommonConfig.BASELINE_THRESHOLD_SCHEDULE_DAYS, TimeUnit.DAYS);
} catch (Exception e) {
logger.error("定时器任务执行失败", e);
}
PERCENT_INSTANCE.setMinimumFractionDigits(2);
}
@Override
public void processElement(DosSketchLog value, ReadOnlyContext ctx, Collector<DosEventLog> out) throws Exception {
public void processElement(DosSketchLog value, ReadOnlyContext ctx, Collector<DosEventLog> out) {
DosEventLog finalResult = null;
try {
Map<String, Map<String, List<Integer>>> broadcast = ctx.getBroadcastState(descriptor).get("broadcast-state");
String destinationIp = value.getDestination_ip();
int vsysId = value.getVsys_id();
String key = destinationIp + "-" + vsysId;
String attackType = value.getAttack_type();
logger.info("当前判断IP{}, 类型: {}",destinationIp,attackType);
if (broadcast.containsKey(destinationIp)){
List<Integer> baseline = broadcast.get(destinationIp).get(attackType);
if (baseline != null && baseline.size() == BASELINE_SIZE){
int timeIndex = getCurrentTimeIndex(value.getSketch_start_time());
Integer base = baseline.get(timeIndex);
long sketchSessions = value.getSketch_sessions();
long diff = sketchSessions - base;
if (diff > 0){
String percent = getDiffPercent(diff, sketchSessions);
double diffPercentDouble = getDiffPercentDouble(percent);
Severity severity = judgeSeverity(diffPercentDouble);
if (severity != Severity.NORMAL){
DosEventLog result = getResult(value, severity, percent);
logger.info("检测到当前server IP {} 存在 {} 异常,日志详情\n {}",destinationIp,attackType,result.toString());
out.collect(result);
}else {
logger.info("当前server IP{} 未出现 {} 异常,日志详情 {}",destinationIp,attackType,value.toString());
}
}
}
}else {
logger.info("未获取到当前server IP{} 类型 {} baseline数据",destinationIp,attackType);
IPAddress destinationIpAddress = new IPAddressString(destinationIp).getAddress();
DosDetectionThreshold threshold = null;
if (thresholdRangeMap.containsKey(vsysId)){
threshold = thresholdRangeMap.get(vsysId).getOrDefault(attackType, TreeRangeMap.create()).get(destinationIpAddress);
}
}catch (Exception e){
logger.error("判定失败\n {} \n{}",value,e);
logger.debug("当前判断IP{}, 类型: {}", key, attackType);
if (threshold == null && baselineMap.containsKey(key)) {
finalResult = getDosEventLogByBaseline(value,key);
} else if (threshold == null && !baselineMap.containsKey(key)) {
finalResult = getDosEventLogBySensitivityThreshold(value);
} else if (threshold != null) {
finalResult = getDosEventLogByStaticThreshold(value, threshold);
} else {
logger.debug("未获取到当前server IP{} 类型 {} 静态阈值 和 baseline", key, attackType);
}
} catch (Exception e) {
logger.error("判定失败\n {} \n{}", value, e);
}
if (finalResult != null){
out.collect(finalResult);
}
}
@Override
public void processBroadcastElement(Map<String, Map<String, List<Integer>>> value, Context ctx, Collector<DosEventLog> out) {
try {
ctx.getBroadcastState(descriptor).put("broadcast-state", value);
}catch (Exception e){
logger.error("更新广播状态失败 {}",e);
public void processBroadcastElement(Map<String, byte[]> value, Context ctx, Collector<DosEventLog> out) throws Exception {
IpUtils.updateIpLook(value);
}
private DosEventLog getDosEventLogBySensitivityThreshold(DosSketchLog value) {
long sketchSessions = value.getSketch_sessions();
Integer staticSensitivityThreshold = NacosUtils.getIntProperty("static.sensitivity.threshold");
long diff = sketchSessions - staticSensitivityThreshold;
return getDosEventLog(value, staticSensitivityThreshold, diff,0, SENSITIVITY_CONDITION_TYPE, SESSIONS_TAG);
}
private DosEventLog getDosEventLogByBaseline(DosSketchLog value,String key) {
String attackType = value.getAttack_type();
long sketchSessions = value.getSketch_sessions();
DosBaselineThreshold dosBaselineThreshold = baselineMap.get(key).get(attackType);
Integer base = getBaseValue(dosBaselineThreshold, value);
long diff = sketchSessions - base;
return getDosEventLog(value, base, diff, 0,BASELINE_CONDITION_TYPE, SESSIONS_TAG);
}
private DosEventLog getDosEventLogByStaticThreshold(DosSketchLog value, DosDetectionThreshold threshold) throws CloneNotSupportedException {
long base = threshold.getSessionsPerSec();
long diff = value.getSketch_sessions() - base;
long profileId = threshold.getProfileId();
DosEventLog result = getDosEventLog(value, base, diff, profileId, STATIC_CONDITION_TYPE, SESSIONS_TAG);
if (result == null) {
base = threshold.getPacketsPerSec();
diff = value.getSketch_packets() - base;
profileId = threshold.getProfileId();
result = getDosEventLog(value, base, diff,profileId, STATIC_CONDITION_TYPE, PACKETS_TAG);
if (result == null) {
base = threshold.getBitsPerSec();
diff = value.getSketch_bytes() - base;
profileId=threshold.getProfileId();
result = getDosEventLog(value, base, diff, profileId, STATIC_CONDITION_TYPE, BITS_TAG);
}
}
/*
ArrayList<DosEventLog> dosEventLogs = new ArrayList<>();
if (result != null){
dosEventLogs.add(result);
Integer[] superiorIds = threshold.getSuperiorIds();
if (superiorIds != null && superiorIds.length > 0){
for (Integer integer:superiorIds){
DosEventLog clone = (DosEventLog) result.clone();
clone.setVsys_id(integer);
clone.setLog_id(SnowflakeId.generateId());
dosEventLogs.add(clone);
}
}
}
*/
return result;
}
public static void main(String[] args) {
DosDetection dosDetection = new DosDetection();
// HashSet<String> strings = new HashSet<>();
// strings.add("13.46.241.36");
// strings.add("25.46.241.45");
// strings.add("133.46.241.53");
// strings.add("219.46.242.74");
// strings.add("153.146.241.196");
// strings.add("132.46.241.21");
// String join = StringUtils.join(strings, ",");
// System.out.println(IpUtils.ipLookup.countryLookup("192.168.50.150"));
System.out.println(Severity.CRITICAL.severity);
private DosEventLog getDosEventLog(DosSketchLog value, long base, long diff, long profileId, int type, String tag) {
DosEventLog result = null;
String destinationIp = value.getDestination_ip();
String attackType = value.getAttack_type();
if (diff > 0 && base != 0) {
double percent = getDiffPercent(diff, base);
Severity severity = judgeSeverity(percent);
Integer staticSensitivityThreshold = NacosUtils.getIntProperty("static.sensitivity.threshold");
if (severity != Severity.NORMAL) {
if (type == BASELINE_CONDITION_TYPE && percent < NacosUtils.getDoubleProperty("baseline.sensitivity.threshold")) {
logger.debug("当前server IP:{},类型:{},基线值{}百分比{}未超过基线敏感阈值,日志详情\n{}", destinationIp, attackType, base, percent, value);
}else if ((type == BASELINE_CONDITION_TYPE || type == SENSITIVITY_CONDITION_TYPE) && value.getSketch_sessions() < staticSensitivityThreshold){
logger.debug("当前server IP:{},类型:{},基线值{}百分比{}未超过静态敏感阈值,日志详情\n{}",destinationIp, attackType, base, percent, value);
}else {
result = getResult(value, base, profileId, severity, percent+1, type, tag);
if (type == SENSITIVITY_CONDITION_TYPE){
result.setSeverity(Severity.MAJOR.severity);
}
logger.info("检测到当前server IP {} 存在 {} 异常,超出基线{} {}倍,基于{}:{}检测,日志详情\n {}", destinationIp,attackType,base,percent,type,tag,result);
}
} else {
logger.debug("当前server IP:{} 未出现 {} 异常,日志详情 {}", destinationIp, attackType, value);
}
}
return result;
}
private DosEventLog getResult(DosSketchLog value,Severity severity,String percent){
private DosEventLog getResult(DosSketchLog value, long base, long profileId, Severity severity, double percent, int type, String tag) {
DosEventLog dosEventLog = new DosEventLog();
dosEventLog.setLog_id(SnowflakeId.generateId());
dosEventLog.setVsys_id(value.getVsys_id());
dosEventLog.setStart_time(value.getSketch_start_time());
dosEventLog.setEnd_time(value.getSketch_start_time()+CommonConfig.FLINK_WINDOW_MAX_TIME);
dosEventLog.setEnd_time(value.getSketch_start_time() + value.getSketch_duration());
dosEventLog.setProfile_id(profileId);
dosEventLog.setAttack_type(value.getAttack_type());
dosEventLog.setSeverity(severity.toString());
dosEventLog.setConditions(getConditions(percent));
dosEventLog.setSeverity(severity.severity);
dosEventLog.setConditions(getConditions(PERCENT_INSTANCE.format(percent), base, value.getSketch_sessions(), type, tag));
dosEventLog.setDestination_ip(value.getDestination_ip());
dosEventLog.setDestination_country(IpUtils.ipLookup.countryLookup(value.getDestination_ip()));
String ipList = value.getSource_ip();
@@ -120,47 +202,108 @@ public class DosDetection extends BroadcastProcessFunction<DosSketchLog, Map<Str
return dosEventLog;
}
private String getConditions(String percent){
return "sessions > "+percent+" of baseline";
}
private String getSourceCountryList(String sourceIpList){
String[] ipArr = sourceIpList.split(",");
HashSet<String> countrySet = new HashSet<>();
for (String ip:ipArr){
countrySet.add(IpUtils.ipLookup.countryLookup(ip));
private Integer getBaseValue(DosBaselineThreshold dosBaselineThreshold, DosSketchLog value) {
Integer base = 0;
try {
if (dosBaselineThreshold != null) {
ArrayList<Integer> baselines = dosBaselineThreshold.getSession_rate();
Integer defaultVaule = dosBaselineThreshold.getSession_rate_default_value();
Integer sessionRateBaselineType = dosBaselineThreshold.getSession_rate_baseline_type();
if (baselines != null && baselines.size() == BASELINE_SIZE) {
int timeIndex = getCurrentTimeIndex(value.getSketch_start_time());
base = baselines.get(timeIndex);
if (base == 0) {
logger.debug("获取到当前IP: {},类型: {} baseline值为0,替换为P95观测值{}", value.getDestination_ip(), value.getAttack_type(), defaultVaule);
base = defaultVaule;
}
if (sessionRateBaselineType == OTHER_BASELINE_TYPE && base < NacosUtils.getIntProperty("static.sensitivity.threshold")){
base = NacosUtils.getIntProperty("static.sensitivity.threshold");
}
}
}
} catch (Exception e) {
logger.error("解析baseline数据失败,返回默认值0", e);
}
return StringUtils.join(countrySet,",");
return base;
}
private int getCurrentTimeIndex(long sketchStartTime){
long currentDayTime = sketchStartTime / (60 * 60 * 24) * 60 * 60 * 24;
long indexLong = (sketchStartTime - currentDayTime) / 600;
return Integer.parseInt(Long.toString(indexLong));
private String getConditions(String percent, long base, long sessions, int type, String tag) {
switch (type) {
case STATIC_CONDITION_TYPE:
return "Rate > " +
base + " " +
tag + "/s";
case BASELINE_CONDITION_TYPE:
return tag + " > " +
percent + " of baseline";
case SENSITIVITY_CONDITION_TYPE:
return String.valueOf(sessions) + " " +
tag + "/s Unusually high " +
StringUtils.capitalize(tag);
default:
throw new IllegalArgumentException("Illegal Argument type:" + type + ", known types = [1,2,3]");
}
}
private String getDiffPercent(long diff,long sketchSessions){
double diffDou = Double.parseDouble(Long.toString(diff));
double sessDou = Double.parseDouble(Long.toString(sketchSessions));
return PERCENT_INSTANCE.format(diffDou / sessDou);
private String getSourceCountryList(String sourceIpList) {
if (StringUtil.isNotBlank(sourceIpList)) {
String countryList;
try {
String[] ipArr = sourceIpList.split(",");
HashSet<String> countrySet = new HashSet<>();
for (String ip : ipArr) {
String country = IpUtils.ipLookup.countryLookup(ip);
if (StringUtil.isNotBlank(country)){
countrySet.add(country);
}
}
countryList = StringUtils.join(countrySet, ", ");
return countryList;
} catch (Exception e) {
logger.error("{} source IP lists 获取国家失败", sourceIpList, e);
return StringUtil.EMPTY;
}
} else {
throw new IllegalArgumentException("Illegal Argument sourceIpList = null");
}
}
private double getDiffPercentDouble(String diffPercent) throws ParseException {
return PERCENT_INSTANCE.parse(diffPercent).doubleValue();
private int getCurrentTimeIndex(long sketchStartTime) {
int index = 0;
try {
long currentDayTime = DateUtils.getTimeFloor(new Date(sketchStartTime * 1000L), "P1D").getTime() / 1000;
long indexLong = (sketchStartTime - currentDayTime) / (86400 / BASELINE_SIZE);
index = Integer.parseInt(Long.toString(indexLong));
} catch (Exception e) {
logger.error("获取time index失败", e);
}
return index;
}
private Severity judgeSeverity(double diffPercent){
if (diffPercent >= CommonConfig.BASELINE_SESSIONS_MINOR_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_WARNING_THRESHOLD){
public static void main(String[] args) {
// System.out.println(new DosDetection().getSourceCountryList("192.0.2.3,138.199.14.31,255.255.255.255,121.14.89.209," +
// "23.200.74.224,161.117.68.253"));
// DosDetection dosDetection = new DosDetection();
// System.out.println(dosDetection.judgeSeverity(dosDetection.getDiffPercent(499, 1000)));
}
private Double getDiffPercent(long diff, long base) {
return BigDecimal.valueOf((float) diff / base).setScale(4, BigDecimal.ROUND_HALF_UP).doubleValue();
}
private Severity judgeSeverity(double diffPercent) {
if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.minor.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.warning.threshold")) {
return Severity.MINOR;
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_WARNING_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_MAJOR_THRESHOLD){
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.warning.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.major.threshold")) {
return Severity.WARNING;
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_MAJOR_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_SEVERE_THRESHOLD){
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.major.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.severe.threshold")) {
return Severity.MAJOR;
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_SEVERE_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_CRITICAL_THRESHOLD){
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.severe.threshold") && diffPercent < NacosUtils.getDoubleProperty("baseline.sessions.critical.threshold")) {
return Severity.SEVERE;
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_CRITICAL_THRESHOLD){
} else if (diffPercent >= NacosUtils.getDoubleProperty("baseline.sessions.critical.threshold")) {
return Severity.CRITICAL;
}else {
} else {
return Severity.NORMAL;
}
}
@@ -188,4 +331,5 @@ public class DosDetection extends BroadcastProcessFunction<DosSketchLog, Map<Str
}
}
}

View File

@@ -1,15 +1,15 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosSketchLog;
import org.apache.commons.lang.StringUtils;
import org.apache.flink.api.java.tuple.Tuple4;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.tuple.Tuple6;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashSet;
@@ -18,18 +18,22 @@ import static com.zdjizhi.sink.OutputStreamSink.outputTag;
/**
* @author 94976
*/
public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosSketchLog, Tuple4<String,String,String,String>, TimeWindow> {
public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosSketchLog, Tuple3<String,String,Integer>, TimeWindow> {
// private static final Logger logger = LoggerFactory.getLogger(EtlProcessFunction.class);
private static final Log logger = LogFactory.get();
private static final String EMPTY_SOURCE_IP_IPV4 = "0.0.0.0";
private static final String EMPTY_SOURCE_IP_IPV6 = "::";
private static final Logger logger = LoggerFactory.getLogger(EtlProcessFunction.class);
@Override
public void process(Tuple4<String,String, String, String> keys,
public void process(Tuple3<String,String,Integer> keys,
Context context, Iterable<DosSketchLog> elements,
Collector<DosSketchLog> out) {
DosSketchLog middleResult = getMiddleResult(keys, elements);
try {
if (middleResult != null){
out.collect(middleResult);
logger.info("获取中间聚合结果:{}",middleResult.toString());
logger.debug("获取中间聚合结果:{}",middleResult.toString());
context.output(outputTag,TrafficServerIpMetrics.getOutputMetric(middleResult));
}
}catch (Exception e){
@@ -37,16 +41,15 @@ public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosS
}
}
private DosSketchLog getMiddleResult(Tuple4<String,String, String, String> keys,Iterable<DosSketchLog> elements){
private DosSketchLog getMiddleResult(Tuple3<String,String,Integer> keys,Iterable<DosSketchLog> elements){
DosSketchLog midResuleLog = new DosSketchLog();
Tuple6<Long, Long, Long,String,Long,Long> values = sketchAggregate(elements);
try {
if (values != null){
midResuleLog.setCommon_sled_ip(keys.f0);
midResuleLog.setCommon_data_center(keys.f1);
midResuleLog.setDestination_ip(keys.f3);
midResuleLog.setAttack_type(keys.f2);
midResuleLog.setAttack_type(keys.f0);
midResuleLog.setDestination_ip(keys.f1);
midResuleLog.setVsys_id(keys.f2);
midResuleLog.setSketch_start_time(values.f4);
midResuleLog.setSketch_duration(values.f5);
midResuleLog.setSource_ip(values.f3);
@@ -62,27 +65,32 @@ public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosS
}
private Tuple6<Long, Long, Long,String,Long,Long> sketchAggregate(Iterable<DosSketchLog> elements){
int cnt = 1;
long sessions = 0;
long packets = 0 ;
long bytes = 0;
long startTime = 0;
long startTime = System.currentTimeMillis()/1000;
long endTime = System.currentTimeMillis()/1000;
long duration = 0;
HashSet<String> sourceIpSet = new HashSet<>();
try {
for (DosSketchLog newSketchLog : elements){
sessions += newSketchLog.getSketch_sessions();
packets += newSketchLog.getSketch_packets();
bytes += newSketchLog.getSketch_bytes();
startTime = newSketchLog.getSketch_start_time();
duration = newSketchLog.getSketch_duration();
cnt += 1;
if (sourceIpSet.size() < CommonConfig.SOURCE_IP_LIST_LIMIT){
sourceIpSet.add(newSketchLog.getSource_ip());
String sourceIp = newSketchLog.getSource_ip();
if (StringUtils.equals(sourceIp,EMPTY_SOURCE_IP_IPV4) || StringUtils.equals(sourceIp,EMPTY_SOURCE_IP_IPV6)){
sessions += newSketchLog.getSketch_sessions();
packets += newSketchLog.getSketch_packets();
bytes += newSketchLog.getSketch_bytes();
startTime = newSketchLog.getSketch_start_time() > startTime ? startTime : newSketchLog.getSketch_start_time();
endTime = newSketchLog.getSketch_start_time() > endTime ? newSketchLog.getSketch_start_time() : endTime;
duration = endTime - startTime == 0 ? 5 : endTime - startTime;
}else {
if (sourceIpSet.size() < CommonConfig.SOURCE_IP_LIST_LIMIT){
sourceIpSet.add(sourceIp);
}
}
}
String sourceIpList = StringUtils.join(sourceIpSet, ",");
return Tuple6.of(sessions/cnt,packets/cnt,bytes/cnt,sourceIpList,startTime,duration);
return Tuple6.of(sessions/CommonConfig.FLINK_WINDOW_MAX_TIME,packets/CommonConfig.FLINK_WINDOW_MAX_TIME,
bytes*8/CommonConfig.FLINK_WINDOW_MAX_TIME,sourceIpList,startTime,duration);
}catch (Exception e){
logger.error("聚合中间结果集失败 {}",e);
}

View File

@@ -0,0 +1,110 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosBaselineThreshold;
import com.zdjizhi.utils.DateUtils;
import com.zdjizhi.utils.HbaseUtils;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.*;
public class ParseBaselineThreshold {
// private static final Logger logger = LoggerFactory.getLogger(ParseBaselineThreshold.class);
private static final Log logger = LogFactory.get();
private static ArrayList<String> floodTypeList = new ArrayList<>();
private static Table table = null;
private static Scan scan = null;
static {
floodTypeList.add("TCP SYN Flood");
floodTypeList.add("UDP Flood");
floodTypeList.add("ICMP Flood");
floodTypeList.add("DNS Flood");
}
private static void prepareHbaseEnv() throws IOException {
org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
config.set("hbase.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
config.set("hbase.client.retries.number", "3");
config.set("hbase.bulkload.retries.number", "3");
config.set("zookeeper.recovery.retry", "3");
config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, CommonConfig.HBASE_CLIENT_OPERATION_TIMEOUT);
config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CommonConfig.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
TableName tableName = TableName.valueOf(CommonConfig.HBASE_BASELINE_TABLE_NAME);
Connection conn = ConnectionFactory.createConnection(config);
table = conn.getTable(tableName);
long currentTimeMillis = System.currentTimeMillis();
scan = new Scan()
.setAllowPartialResults(true)
.setTimeRange(DateUtils.getSomeDate(new Date(currentTimeMillis), Math.negateExact(CommonConfig.HBASE_BASELINE_TTL)).getTime(), currentTimeMillis)
.setLimit(CommonConfig.HBASE_BASELINE_TOTAL_NUM);
logger.info("连接hbase成功正在读取baseline数据");
}
static Map<String, Map<String, DosBaselineThreshold>> readFromHbase() {
Map<String, Map<String, DosBaselineThreshold>> baselineMap = new HashMap<>();
try {
prepareHbaseEnv();
logger.info("开始读取baseline数据");
ResultScanner rs = table.getScanner(scan);
for (Result result : rs) {
Map<String, DosBaselineThreshold> floodTypeMap = new HashMap<>();
String rowkey = Bytes.toString(result.getRow());
for (String type:floodTypeList){
DosBaselineThreshold baselineThreshold = new DosBaselineThreshold();
ArrayList<Integer> sessionRate = HbaseUtils.getArraylist(result, type, "session_rate");
if (sessionRate != null && !sessionRate.isEmpty()){
Integer defaultValue = HbaseUtils.getIntegerValue(result, type, "session_rate_default_value");
Integer rateBaselineType = HbaseUtils.getIntegerValue(result, type, "session_rate_baseline_type");
baselineThreshold.setSession_rate(sessionRate);
baselineThreshold.setSession_rate_default_value(defaultValue);
baselineThreshold.setSession_rate_baseline_type(rateBaselineType);
floodTypeMap.put(type,baselineThreshold);
}
}
baselineMap.put(rowkey, floodTypeMap);
}
logger.info("格式化baseline数据成功读取IP共{}", baselineMap.size());
} catch (Exception e) {
logger.error("读取hbase数据失败", e);
}
return baselineMap;
}
public static void main(String[] args) {
long currentTimeMillis = System.currentTimeMillis();
long p200D = DateUtils.getSomeDate(new Date(currentTimeMillis), Math.negateExact(CommonConfig.HBASE_BASELINE_TTL)).getTime();
System.out.println(p200D);
System.out.println(currentTimeMillis);
System.out.println(currentTimeMillis - p200D);
Map<String, Map<String, DosBaselineThreshold>> baselineMap = readFromHbase();
Set<String> keySet = baselineMap.keySet();
for (String key : keySet) {
Map<String, DosBaselineThreshold> stringTuple2Map = baselineMap.get(key);
Set<String> strings = stringTuple2Map.keySet();
for (String s:strings){
DosBaselineThreshold dosBaselineThreshold = stringTuple2Map.get(s);
System.out.println(key+"---"+s+"---"+dosBaselineThreshold);
}
}
System.out.println(baselineMap.size());
}
}

View File

@@ -1,5 +1,6 @@
package com.zdjizhi.etl;
import com.fasterxml.jackson.databind.JavaType;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.source.DosSketchSource;
@@ -14,12 +15,18 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.*;
/**
* @author wlh
*/
public class ParseSketchLog {
private static Logger logger = LoggerFactory.getLogger(ParseSketchLog.class);
private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
private static JavaType hashmapJsonType = jsonMapperInstance.createCollectionType(HashMap.class, String.class, Object.class);
private static JavaType listType = jsonMapperInstance.createCollectionType(ArrayList.class, HashMap.class);
public static SingleOutputStreamOperator<DosSketchLog> getSketchSource(){
return flatSketchSource().assignTimestampsAndWatermarks(createWatermarkStrategy());
@@ -37,23 +44,21 @@ public class ParseSketchLog {
private static class FlatSketchLog implements FlatMapFunction<String, DosSketchLog> {
@Override
public void flatMap(String s, Collector<DosSketchLog> collector) throws Exception {
public void flatMap(String s, Collector<DosSketchLog> collector) {
try {
if (StringUtil.isNotBlank(s)){
HashMap<String, Object> sketchSource = (HashMap<String, Object>) JsonMapper.fromJsonString(s, Object.class);
String commonSledIp = sketchSource.get("common_sled_ip").toString();
String commonDataCenter = sketchSource.get("common_data_center").toString();
HashMap<String, Object> sketchSource = jsonMapperInstance.fromJson(s, hashmapJsonType);
long sketchStartTime = Long.parseLong(sketchSource.get("sketch_start_time").toString());
long sketchDuration = Long.parseLong(sketchSource.get("sketch_duration").toString());
String attackType = sketchSource.get("attack_type").toString();
ArrayList<HashMap<String, Object>> reportIpList = (ArrayList<HashMap<String, Object>>) sketchSource.get("report_ip_list");
int vsysId = Integer.parseInt(sketchSource.getOrDefault("common_vsys_id", 1).toString());
ArrayList<HashMap<String, Object>> reportIpList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(sketchSource.get("report_ip_list")), listType);
for (HashMap<String, Object> obj : reportIpList) {
DosSketchLog dosSketchLog = new DosSketchLog();
dosSketchLog.setCommon_sled_ip(commonSledIp);
dosSketchLog.setCommon_data_center(commonDataCenter);
dosSketchLog.setSketch_start_time(sketchStartTime);
dosSketchLog.setSketch_duration(sketchDuration);
dosSketchLog.setAttack_type(attackType);
dosSketchLog.setVsys_id(vsysId);
String sourceIp = obj.get("source_ip").toString();
String destinationIp = obj.get("destination_ip").toString();
long sketchSessions = Long.parseLong(obj.get("sketch_sessions").toString());
@@ -61,11 +66,11 @@ public class ParseSketchLog {
long sketchBytes = Long.parseLong(obj.get("sketch_bytes").toString());
dosSketchLog.setSource_ip(sourceIp);
dosSketchLog.setDestination_ip(destinationIp);
dosSketchLog.setSketch_sessions(sketchSessions/sketchDuration);
dosSketchLog.setSketch_packets(sketchPackets/sketchDuration);
dosSketchLog.setSketch_bytes(sketchBytes*8/sketchDuration);
dosSketchLog.setSketch_sessions(sketchSessions);
dosSketchLog.setSketch_packets(sketchPackets);
dosSketchLog.setSketch_bytes(sketchBytes);
collector.collect(dosSketchLog);
logger.info("数据解析成功:{}",dosSketchLog.toString());
logger.debug("数据解析成功:{}",dosSketchLog.toString());
}
}
} catch (Exception e) {

View File

@@ -0,0 +1,291 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.fasterxml.jackson.databind.JavaType;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosDetectionThreshold;
import com.zdjizhi.common.DosVsysId;
import com.zdjizhi.utils.HttpClientUtils;
import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.NacosUtils;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import org.apache.flink.shaded.guava18.com.google.common.collect.Range;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.message.BasicHeader;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* @author wlh
*/
public class ParseStaticThreshold {
// private static Logger logger = LoggerFactory.getLogger(ParseStaticThreshold.class);
private static final Log logger = LogFactory.get();
private static String encryptpwd;
private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
private static JavaType hashmapJsonType = jsonMapperInstance.createCollectionType(HashMap.class, String.class, Object.class);
private static JavaType thresholdType = jsonMapperInstance.createCollectionType(ArrayList.class, DosDetectionThreshold.class);
private static JavaType vsysIDType = jsonMapperInstance.createCollectionType(ArrayList.class, DosVsysId.class);
static {
//加载加密登录密码
encryptpwd = getEncryptpwd();
}
/**
* 获取加密密码
*/
private static String getEncryptpwd() {
String psw = HttpClientUtils.ERROR_MESSAGE;
try {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("password", "admin");
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_ENCRYPTPWD_PATH, parms);
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build());
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
psw = data.get("encryptpwd").toString();
} else {
logger.error(msg);
}
}
} catch (URISyntaxException e) {
logger.error("构造URI异常", e);
} catch (Exception e) {
logger.error("获取encryptpwd失败", e);
}
return psw;
}
/**
* 登录bifang服务获取token
*
* @return token
*/
private static String loginBifangServer() {
String token = HttpClientUtils.ERROR_MESSAGE;
try {
if (!HttpClientUtils.ERROR_MESSAGE.equals(encryptpwd)) {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("username", "admin");
parms.put("password", encryptpwd);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_LOGIN_PATH, parms);
String resposeJsonStr = HttpClientUtils.httpPost(uriBuilder.build(), null);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
token = data.get("token").toString();
} else {
logger.error(msg);
}
}
}
} catch (Exception e) {
logger.error("登录失败,未获取到token ", e);
}
return token;
}
/**
* 获取vsysId配置列表
*
* @return vsysIdList
*/
private static ArrayList<DosVsysId> getVsysId() {
ArrayList<DosVsysId> vsysIdList = null;
try {
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
// parms.put("orderBy", "vsysId desc");
parms.put("type", 1);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_VSYSID_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
BasicHeader authorization = new BasicHeader("Authorization", token);
BasicHeader authorization1 = new BasicHeader("Content-Type", "application/x-www-form-urlencoded");
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build(), authorization, authorization1);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
Object list = data.get("list");
if (list != null) {
vsysIdList = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), vsysIDType);
logger.info("获取到vsysId {}条", vsysIdList.size());
} else {
logger.warn("vsysIdList为空");
}
} else {
logger.error(msg);
}
}
}
} catch (Exception e) {
logger.error("获取vsysId失败,请检查bifang服务或登录配置信息 ", e);
}
return vsysIdList;
}
/**
* 根据vsysId获取静态阈值配置列表
*
* @return thresholds
*/
private static ArrayList<DosDetectionThreshold> getDosDetectionThreshold() {
ArrayList<DosDetectionThreshold> vsysThresholds = new ArrayList<>();
ArrayList<DosVsysId> vsysIds = getVsysId();
try {
if (vsysIds != null) {
for (DosVsysId dosVsysId : vsysIds) {
Integer vsysId = dosVsysId.getId() == null ? 1 : dosVsysId.getId();
Integer[] superiorIds = dosVsysId.getSuperiorIds();
URIBuilder uriBuilder = new URIBuilder(CommonConfig.BIFANG_SERVER_URI);
HashMap<String, Object> parms = new HashMap<>();
parms.put("pageSize", -1);
parms.put("orderBy", "profileId asc");
parms.put("isValid", 1);
parms.put("vsysId", vsysId);
HttpClientUtils.setUrlWithParams(uriBuilder, CommonConfig.BIFANG_SERVER_POLICY_THRESHOLD_PATH, parms);
String token = NacosUtils.getStringProperty("bifang.server.token");
if (!HttpClientUtils.ERROR_MESSAGE.equals(token)) {
BasicHeader authorization = new BasicHeader("Authorization", token);
BasicHeader authorization1 = new BasicHeader("Content-Type", "application/x-www-form-urlencoded");
String resposeJsonStr = HttpClientUtils.httpGet(uriBuilder.build(), authorization, authorization1);
if (!HttpClientUtils.ERROR_MESSAGE.equals(resposeJsonStr)) {
HashMap<String, Object> resposeMap = jsonMapperInstance.fromJson(resposeJsonStr, hashmapJsonType);
boolean success = (boolean) resposeMap.get("success");
String msg = resposeMap.get("msg").toString();
if (success) {
HashMap<String, Object> data = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(resposeMap.get("data")), hashmapJsonType);
Object list = data.get("list");
if (list != null) {
ArrayList<DosDetectionThreshold> thresholds = jsonMapperInstance.fromJson(jsonMapperInstance.toJson(list), thresholdType);
for (DosDetectionThreshold dosDetectionThreshold : thresholds) {
dosDetectionThreshold.setSuperiorIds(superiorIds);
vsysThresholds.add(dosDetectionThreshold);
}
logger.info("获取到vsys id是{}静态阈值配置{}条", vsysId, thresholds.size());
} else {
logger.warn("静态阈值配置为空");
}
} else {
logger.error(msg);
}
}
}
}
}
} catch (Exception e) {
logger.error("获取静态阈值配置失败,请检查bifang服务或登录配置信息 ", e);
}
return vsysThresholds;
}
/**
* 基于静态阈值构建threshold RangeMapk:IP段或具体IPv:配置信息
*
* @return threshold RangeMap
*/
static HashMap<Integer, HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> createStaticThreshold() {
HashMap<Integer, HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> thresholdRangeMap = new HashMap<>(4);
try {
ArrayList<DosDetectionThreshold> dosDetectionThreshold = getDosDetectionThreshold();
if (dosDetectionThreshold != null && !dosDetectionThreshold.isEmpty()) {
for (DosDetectionThreshold threshold : dosDetectionThreshold) {
String attackType = threshold.getAttackType();
int vsysId = threshold.getVsysId();
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> rangeMap = thresholdRangeMap.getOrDefault(vsysId, new HashMap<>());
TreeRangeMap<IPAddress, DosDetectionThreshold> treeRangeMap = rangeMap.getOrDefault(attackType, TreeRangeMap.create());
ArrayList<String> serverIpList = threshold.getServerIpList();
for (String sip : serverIpList) {
IPAddressString ipAddressString = new IPAddressString(sip);
if (ipAddressString.isIPAddress()) {
IPAddress address = ipAddressString.getAddress();
if (address.isPrefixed()) {
IPAddress lower = address.getLower();
IPAddress upper = address.getUpper();
if (!address.isMultiple()) {
lower = address.adjustPrefixLength(address.getBitCount());
upper = address.toMaxHost().withoutPrefixLength();
}
Map.Entry<Range<IPAddress>, DosDetectionThreshold> lowerEntry = treeRangeMap.getEntry(lower);
Map.Entry<Range<IPAddress>, DosDetectionThreshold> upperEntry = treeRangeMap.getEntry(upper);
if (lowerEntry != null && upperEntry == null) {
Range<IPAddress> lowerEntryKey = lowerEntry.getKey();
DosDetectionThreshold lowerEntryValue = lowerEntry.getValue();
treeRangeMap.put(Range.closedOpen(lowerEntryKey.lowerEndpoint(), lower), lowerEntryValue);
treeRangeMap.put(Range.closed(lower, upper), threshold);
} else if (lowerEntry == null && upperEntry != null) {
Range<IPAddress> upperEntryKey = upperEntry.getKey();
DosDetectionThreshold upperEntryValue = upperEntry.getValue();
treeRangeMap.put(Range.openClosed(upper, upperEntryKey.upperEndpoint()), upperEntryValue);
treeRangeMap.put(Range.closed(lower, upper), threshold);
} else {
treeRangeMap.put(Range.closed(lower, upper), threshold);
}
} else {
treeRangeMap.put(Range.closed(address, address), threshold);
}
}
}
rangeMap.put(attackType, treeRangeMap);
thresholdRangeMap.put(vsysId, rangeMap);
}
}
} catch (Exception e) {
logger.error("构建threshold RangeMap失败", e);
}
return thresholdRangeMap;
}
public static void main(String[] args) {
/*
ArrayList<DosDetectionThreshold> dosDetectionThreshold = getDosDetectionThreshold();
// dosDetectionThreshold.forEach(System.out::println);
getVsysId().forEach(System.out::println);
System.out.println("------------------------");
*/
HashMap<Integer, HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>>> staticThreshold = createStaticThreshold();
System.out.println("------------------------");
for (Integer integer : staticThreshold.keySet()) {
HashMap<String, TreeRangeMap<IPAddress, DosDetectionThreshold>> stringTreeRangeMapHashMap = staticThreshold.get(integer);
for (String type : stringTreeRangeMapHashMap.keySet()) {
Map<Range<IPAddress>, DosDetectionThreshold> asMapOfRanges = stringTreeRangeMapHashMap.get(type).asMapOfRanges();
for (Range<IPAddress> range : asMapOfRanges.keySet()) {
DosDetectionThreshold threshold = asMapOfRanges.get(range);
System.out.println(integer + "---" + type + "---" + range + "---" + threshold);
}
System.out.println("------------------------");
}
}
// String s = loginBifangServer();
// System.out.println(s);
}
}

View File

@@ -1,27 +1,27 @@
package com.zdjizhi.etl;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosMetricsLog;
import com.zdjizhi.common.DosSketchLog;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class TrafficServerIpMetrics {
private static final Logger logger = LoggerFactory.getLogger(TrafficServerIpMetrics.class);
// private static final Logger logger = LoggerFactory.getLogger(TrafficServerIpMetrics.class);
private static final Log logger = LogFactory.get();
static DosMetricsLog getOutputMetric(DosSketchLog midResuleLog) {
DosMetricsLog dosMetricsLog = new DosMetricsLog();
dosMetricsLog.setSketch_start_time(timeFloor(System.currentTimeMillis()/1000));
dosMetricsLog.setCommon_sled_ip(midResuleLog.getCommon_sled_ip());
dosMetricsLog.setCommon_data_center(midResuleLog.getCommon_data_center());
dosMetricsLog.setDestination_ip(midResuleLog.getDestination_ip());
dosMetricsLog.setAttack_type(midResuleLog.getAttack_type());
dosMetricsLog.setSession_rate(midResuleLog.getSketch_sessions());
dosMetricsLog.setPacket_rate(midResuleLog.getSketch_packets());
dosMetricsLog.setBit_rate(midResuleLog.getSketch_bytes());
logger.info("metric 结果已加载:{}",dosMetricsLog.toString());
dosMetricsLog.setVsys_id(midResuleLog.getVsys_id());
dosMetricsLog.setPartition_num(getPartitionNumByIp(midResuleLog.getDestination_ip()));
logger.debug("metric 结果已加载:{}",dosMetricsLog.toString());
return dosMetricsLog;
}
@@ -29,4 +29,13 @@ class TrafficServerIpMetrics {
return sketchStartTime / CommonConfig.FLINK_WINDOW_MAX_TIME * CommonConfig.FLINK_WINDOW_MAX_TIME;
}
private static int getPartitionNumByIp(String destinationIp){
return Math.abs(destinationIp.hashCode()) % CommonConfig.DESTINATION_IP_PARTITION_NUM;
}
public static void main(String[] args) {
System.out.println(getPartitionNumByIp("146.177.223.43"));
System.out.println("146.177.223.43".hashCode());
}
}

View File

@@ -2,6 +2,10 @@ package com.zdjizhi.main;
import com.zdjizhi.sink.OutputStreamSink;
/**
* @author wlh
* 程序主类入口
*/
public class DosDetectionApplication {
public static void main(String[] args) {

View File

@@ -6,10 +6,15 @@ import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.KafkaUtils;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import java.util.Objects;
class DosEventSink {
static void dosEventOutputSink(SingleOutputStreamOperator<DosEventLog> dosEventLogOutputStream){
dosEventLogOutputStream.map(JsonMapper::toJsonString).addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_EVENT_TOPIC_NAME))
dosEventLogOutputStream
.filter(Objects::nonNull)
.map(JsonMapper::toJsonString)
.addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_EVENT_TOPIC_NAME))
.setParallelism(CommonConfig.KAFKA_OUTPUT_EVENT_PARALLELISM);
}

View File

@@ -1,48 +1,43 @@
package com.zdjizhi.sink;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.DosEventLog;
import com.zdjizhi.common.DosMetricsLog;
import com.zdjizhi.common.DosSketchLog;
import com.zdjizhi.etl.EtlProcessFunction;
import com.zdjizhi.etl.DosDetection;
import com.zdjizhi.etl.EtlProcessFunction;
import com.zdjizhi.etl.ParseSketchLog;
import com.zdjizhi.source.BaselineSource;
import com.zdjizhi.source.DosSketchSource;
import com.zdjizhi.utils.FlinkEnvironmentUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple4;
import org.apache.flink.api.java.typeutils.MapTypeInfo;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.OutputTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.Map;
import java.util.Properties;
/**
* @author 94976
*/
public class OutputStreamSink {
private static final Logger logger = LoggerFactory.getLogger(OutputStreamSink.class);
// private static final Logger logger = LoggerFactory.getLogger(OutputStreamSink.class);
private static final Log logger = LogFactory.get();
public static OutputTag<DosMetricsLog> outputTag = new OutputTag<DosMetricsLog>("traffic server ip metrics"){};
private static MapStateDescriptor<String, Map<String, Map<String, List<Integer>>>> descriptor = new MapStateDescriptor<>("boradcast-state",
Types.STRING,
new MapTypeInfo<>(String.class, new MapTypeInfo<>(String.class, (Class<List<Integer>>) (Class<?>) List.class).getTypeClass()));
public static void finalOutputSink(){
try {
SingleOutputStreamOperator<DosSketchLog> middleStream = getMiddleStream();
SingleOutputStreamOperator<DosEventLog> dosEventLogOutputStream = getOutputSinkStream(middleStream);
DosEventSink.dosEventOutputSink(dosEventLogOutputStream);
DosEventSink.dosEventOutputSink(getEventSinkStream(middleStream));
TrafficServerIpMetricsSink.sideOutputMetricsSink(middleStream);
FlinkEnvironmentUtils.streamExeEnv.execute(CommonConfig.STREAM_EXECUTION_JOB_NAME);
} catch (Exception e) {
@@ -50,88 +45,47 @@ public class OutputStreamSink {
}
}
public static void main(String[] args) throws Exception {
SingleOutputStreamOperator<DosSketchLog> middleStream = getMiddleStream();
SingleOutputStreamOperator<DosEventLog> dosEventLogOutputStream = getOutputSinkStream(middleStream);
DosEventSink.dosEventOutputSink(dosEventLogOutputStream);
TrafficServerIpMetricsSink.sideOutputMetricsSink(middleStream);
dosEventLogOutputStream.print();
FlinkEnvironmentUtils.streamExeEnv.execute();
}
private static SingleOutputStreamOperator<DosEventLog> getEventSinkStream(SingleOutputStreamOperator<DosSketchLog> middleStream){
DataStreamSource<Map<String, byte[]>> broadcastSource=null;
Properties nacosProperties = new Properties();
private static SingleOutputStreamOperator<DosEventLog> getOutputSinkStream(SingleOutputStreamOperator<DosSketchLog> middleStream){
nacosProperties.put(PropertyKeyConst.SERVER_ADDR,CommonConfig.NACOS_SERVER_ADDR);
nacosProperties.setProperty(PropertyKeyConst.USERNAME, CommonConfig.NACOS_USERNAME);
nacosProperties.setProperty(PropertyKeyConst.PASSWORD, CommonConfig.NACOS_PASSWORD);
nacosProperties.setProperty(PropertyKeyConst.NAMESPACE, CommonConfig.NACOS_NAMESPACE);
BroadcastStream<Map<String, Map<String,List<Integer>>>> broadcast = FlinkEnvironmentUtils.streamExeEnv
.addSource(new BaselineSource())
.setParallelism(CommonConfig.HBASE_INPUT_PARALLELISM)
.broadcast(descriptor);
logger.info("广播变量加载成功!!");
if ("CLUSTER".equals(CommonConfig.CLUSTER_OR_SINGLE)){
broadcastSource = DosSketchSource.broadcastSource(nacosProperties,CommonConfig.HDFS_PATH);
}else {
broadcastSource= DosSketchSource.singleBroadcastSource(nacosProperties);
}
return middleStream.keyBy(new SecondKeySelector())
// .window(TumblingEventTimeWindows.of(Time.seconds(CommonConfig.FLINK_WINDOW_MAX_TIME)))
.reduce(new SecondReduceFunc())
MapStateDescriptor<String,Map> descriptor =
new MapStateDescriptor<>("descriptorTest", Types.STRING, TypeInformation.of(Map.class));
BroadcastStream<Map<String, byte[]>> broadcast = broadcastSource.broadcast(descriptor);
return middleStream
.connect(broadcast)
.process(new DosDetection())
.setParallelism(CommonConfig.FLINK_SECOND_AGG_PARALLELISM);
.process(new DosDetection()).setParallelism(CommonConfig.FLINK_DETECTION_MAP_PARALLELISM);
}
private static SingleOutputStreamOperator<DosSketchLog> getMiddleStream(){
return ParseSketchLog.getSketchSource()
.keyBy(new FirstKeySelector())
.keyBy(new KeysSelector())
.window(TumblingEventTimeWindows.of(Time.seconds(CommonConfig.FLINK_WINDOW_MAX_TIME)))
.process(new EtlProcessFunction())
.setParallelism(CommonConfig.FLINK_FIRST_AGG_PARALLELISM);
}
private static String groupUniqSourceIp(String sourceIp1,String sourceIp2){
HashSet<String> sourceIpSet = new HashSet<>();
Collections.addAll(sourceIpSet, (sourceIp1 + "," + sourceIp2).split(","));
if (sourceIpSet.size() > CommonConfig.SOURCE_IP_LIST_LIMIT){
return StringUtils.join(takeUniqLimit(sourceIpSet,CommonConfig.SOURCE_IP_LIST_LIMIT),",");
}
return StringUtils.join(sourceIpSet,",");
}
private static<T> Collection<T> takeUniqLimit(Collection<T> collection, int limit){
int i =0;
Collection<T> newSet = new HashSet<>();
for (T t:collection){
if (i < limit){
newSet.add(t);
i += 1;
}
}
return newSet;
}
private static class FirstKeySelector implements KeySelector<DosSketchLog, Tuple4<String, String, String, String>>{
private static class KeysSelector implements KeySelector<DosSketchLog, Tuple3<String, String, Integer>>{
@Override
public Tuple4<String, String, String, String> getKey(DosSketchLog dosSketchLog) throws Exception {
return Tuple4.of(
dosSketchLog.getCommon_sled_ip(),
dosSketchLog.getCommon_data_center(),
public Tuple3<String, String, Integer> getKey(DosSketchLog dosSketchLog){
return Tuple3.of(
dosSketchLog.getAttack_type(),
dosSketchLog.getDestination_ip());
}
}
private static class SecondKeySelector implements KeySelector<DosSketchLog, Tuple2<String, String>> {
@Override
public Tuple2<String, String> getKey(DosSketchLog dosSketchLog) throws Exception {
return Tuple2.of(
dosSketchLog.getAttack_type(),
dosSketchLog.getDestination_ip());
}
}
private static class SecondReduceFunc implements ReduceFunction<DosSketchLog> {
@Override
public DosSketchLog reduce(DosSketchLog value1, DosSketchLog value2) throws Exception {
value1.setSketch_sessions((value1.getSketch_sessions()+value2.getSketch_sessions())/2);
value1.setSketch_bytes((value1.getSketch_bytes()+value2.getSketch_bytes())/2);
value1.setSketch_packets((value1.getSketch_packets()+value2.getSketch_packets())/2);
value1.setSource_ip(groupUniqSourceIp(value1.getSource_ip(),value2.getSource_ip()));
return value1;
dosSketchLog.getDestination_ip(),
dosSketchLog.getVsys_id());
}
}

View File

@@ -16,6 +16,10 @@ class TrafficServerIpMetricsSink {
DataStream<DosMetricsLog> sideOutput = outputStream.getSideOutput(outputTag);
sideOutput.map(JsonMapper::toJsonString).addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_METRIC_TOPIC_NAME))
.setParallelism(CommonConfig.KAFKA_OUTPUT_METRIC_PARALLELISM);
}
}

View File

@@ -1,128 +0,0 @@
package com.zdjizhi.source;
import com.zdjizhi.common.CommonConfig;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author wlh
*/
public class BaselineSource extends RichSourceFunction<Map<String, Map<String,List<Integer>>>> {
private static final Logger logger = LoggerFactory.getLogger(BaselineSource.class);
private Connection conn = null;
private Table table = null;
private Scan scan = null;
@Override
public void open(Configuration parameters) throws Exception {
org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
config.set("hbase.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
config.set("hbase.client.retries.number", "3");
config.set("hbase.bulkload.retries.number", "3");
config.set("zookeeper.recovery.retry", "3");
config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, CommonConfig.HBASE_CLIENT_OPERATION_TIMEOUT);
config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CommonConfig.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
TableName tableName = TableName.valueOf(CommonConfig.HBASE_BASELINE_TABLE_NAME);
conn = ConnectionFactory.createConnection(config);
table = conn.getTable(tableName);
scan = new Scan().setAllowPartialResults(true).setLimit(CommonConfig.HBASE_BASELINE_TOTAL_NUM);
logger.info("连接hbase成功正在读取baseline数据");
// .addFamily(Bytes.toBytes(CommonConfig.HBASE_BASELINE_FAMLIY_NAME));
}
@Override
public void close() throws Exception {
super.close();
}
@Override
public void run(SourceContext<Map<String, Map<String,List<Integer>>>> sourceContext) throws Exception {
logger.info("开始读取baseline数据");
ResultScanner rs = table.getScanner(scan);
// Map<String, List<Integer>[]> baselineMap = new HashMap<>();
Map<String, Map<String,List<Integer>>> baselineMap = new HashMap<>();
for (Result result : rs) {
Map<String, List<Integer>> floodTypeMap = new HashMap<>();
String rowkey = Bytes.toString(result.getRow());
ArrayList<Integer> tcp = getArraylist(result,"TCP SYN Flood", "session_num");
ArrayList<Integer> udp = getArraylist(result,"UDP Flood", "session_num");
ArrayList<Integer> icmp = getArraylist(result,"ICMP Flood", "session_num");
ArrayList<Integer> dns = getArraylist(result,"DNS Amplification", "session_num");
floodTypeMap.put("TCP SYN Flood",tcp);
floodTypeMap.put("UDP Flood",udp);
floodTypeMap.put("ICMP Flood",icmp);
floodTypeMap.put("DNS Amplification",dns);
// List[] arr = new ArrayList[]{tcp,udp,icmp,dns};
baselineMap.put(rowkey,floodTypeMap);
}
sourceContext.collect(baselineMap);
logger.info("格式化baseline数据成功读取IP共{}",baselineMap.size());
}
private static ArrayList<Integer> getArraylist(Result result,String family,String qualifier) throws IOException {
if (!result.containsColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier))){
return null;
}
ArrayWritable w = new ArrayWritable(IntWritable.class);
w.readFields(new DataInputStream(new ByteArrayInputStream(result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier)))));
return fromWritable(w);
}
private static ArrayList<Integer> fromWritable(ArrayWritable writable) {
Writable[] writables = writable.get();
ArrayList<Integer> list = new ArrayList<>(writables.length);
for (Writable wrt : writables) {
list.add(((IntWritable)wrt).get());
}
return list;
}
@Override
public void cancel() {
try {
if (table != null) {
table.close();
}
if (conn != null) {
conn.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(5000);
DataStreamSource<Map<String, Map<String,List<Integer>>>> mapDataStreamSource = env.addSource(new BaselineSource());
DataStream<Map<String, Map<String,List<Integer>>>> broadcast = mapDataStreamSource.broadcast();
mapDataStreamSource.print();
env.execute();
}
}

View File

@@ -1,14 +1,20 @@
package com.zdjizhi.source;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.utils.FlinkEnvironmentUtils;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* @author wlh
*/
public class DosSketchSource {
private static StreamExecutionEnvironment streamExeEnv = FlinkEnvironmentUtils.streamExeEnv;
@@ -17,10 +23,24 @@ public class DosSketchSource {
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", CommonConfig.KAFKA_INPUT_BOOTSTRAP_SERVERS);
properties.setProperty("group.id", CommonConfig.KAFKA_GROUP_ID);
if (CommonConfig.SASL_JAAS_CONFIG_FLAG == 1){
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "PLAIN");
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\""+CommonConfig.SASL_JAAS_CONFIG_USER+"\" password=\""+CommonConfig.SASL_JAAS_CONFIG_PASSWORD+"\";");
}
return streamExeEnv.addSource(new FlinkKafkaConsumer<String>(
CommonConfig.KAFKA_INPUT_TOPIC_NAME,
new SimpleStringSchema(), properties))
.setParallelism(CommonConfig.KAFKA_INPUT_PARALLELISM);
}
public static DataStreamSource<Map<String, byte[]>> broadcastSource(Properties nacosProperties, String STORE_PATH){
return streamExeEnv.addSource(new HttpSource(nacosProperties, CommonConfig.NACOS_DATA_ID, CommonConfig.NACOS_GROUP, CommonConfig.NACOS_READ_TIMEOUT,STORE_PATH));
}
public static DataStreamSource<Map<String, byte[]>> singleBroadcastSource(Properties nacosProperties){
return streamExeEnv.addSource(new SingleHttpSource(nacosProperties, CommonConfig.NACOS_DATA_ID, CommonConfig.NACOS_GROUP, CommonConfig.NACOS_READ_TIMEOUT));
}
}

View File

@@ -0,0 +1,182 @@
package com.zdjizhi.source;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.json.JSONObject;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.base.Joiner;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.KnowledgeLog;
import com.zdjizhi.utils.*;
import org.apache.commons.io.IOUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executor;
public class HttpSource extends RichHttpSourceFunction<Map<String, byte[]>> {
private static final Logger logger = LoggerFactory.getLogger(HttpSource.class);
private static final String EXPR = "$.[?(@.version=='latest' && @.name in ['ip_v4_built_in','ip_v6_built_in','ip_v4_user_defined','ip_v6_user_defined'])].['name','sha256','format','path']";
//连接nacos的配置
private Properties nacosProperties;
//nacos data id
private String NACOS_DATA_ID;
//nacos group
private String NACOS_GROUP;
//nacos 连接超时时间
private long NACOS_READ_TIMEOUT;
//上传到hdfs的路径
private String STORE_PATH;
private ConfigService configService;
// private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
// private static JavaType listType = jsonMapperInstance.createCollectionType(List.class, KnowledgeLog.class);
private static Map<String, String> updateMap = new HashMap<>();
private static HashMap<String, byte[]> knowledgeFileCache;
private boolean isRunning = true;
public HttpSource(Properties nacosProperties, String NACOS_DATA_ID, String NACOS_GROUP, long NACOS_READ_TIMEOUT, String storePath) {
this.nacosProperties = nacosProperties;
this.NACOS_DATA_ID = NACOS_DATA_ID;
this.NACOS_GROUP = NACOS_GROUP;
this.NACOS_READ_TIMEOUT = NACOS_READ_TIMEOUT;
this.STORE_PATH = storePath;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
//初始化元数据缓存
updateMap = new HashMap<>(16);
//初始化定位库缓存
knowledgeFileCache = new HashMap<>(16);
logger.info("连接nacos" + nacosProperties.getProperty(PropertyKeyConst.SERVER_ADDR));
configService = NacosFactory.createConfigService(nacosProperties);
}
@Override
public void run(SourceContext ctx) throws Exception {
// ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String format = formatter.format(new Date());
logger.info(format + "receive config from nacos:" + config);
System.out.println(format + "receive config from nacos:" + config);
if (StringUtil.isNotBlank(config)) {
ArrayList<Object> metaList = JsonPath.parse(config).read(EXPR);
loadKnowledge(metaList);
}
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
logger.info("receive update config:" + configMsg);
if (StringUtil.isNotBlank(configMsg)) {
ArrayList<Object> metaList = JsonPath.parse(configMsg).read(EXPR);
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
if (!sha256.equals(updateMap.get(fileName))) {
updateMap.put(fileName, sha256);
updateKnowledge(fileName, filePath);
}
}
ctx.collect(knowledgeFileCache);
}
}
} catch (Exception e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
while (isRunning) {
Thread.sleep(10000);
}
}
private void loadKnowledge(ArrayList<Object> metaList) {
InputStream inputStream = null;
try {
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
updateMap.put(fileName, sha256);
knowledgeFileCache.put(fileName, IOUtils.toByteArray(inputStream));
}
}
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
}
}
private void updateKnowledge(String fileName, String filePath) {
InputStream inputStream = null;
FileOutputStream outputStream = null;
try {
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
byte[] bytes = IOUtils.toByteArray(inputStream);
HdfsUtils.uploadFileByBytes(CommonConfig.HDFS_PATH + fileName, bytes);
knowledgeFileCache.put(fileName, bytes);
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
}
}
@Override
public void cancel() {
this.isRunning = false;
}
}

View File

@@ -0,0 +1,6 @@
package com.zdjizhi.source;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
public interface HttpSourceFunction<OUT> extends SourceFunction<OUT> {
}

View File

@@ -0,0 +1,10 @@
package com.zdjizhi.source;
import org.apache.flink.api.common.functions.AbstractRichFunction;
public abstract class RichHttpSourceFunction<OUT> extends AbstractRichFunction implements HttpSourceFunction<OUT> {
private static final long serialVersionUID = 1L;
public RichHttpSourceFunction() {
}
}

View File

@@ -0,0 +1,217 @@
package com.zdjizhi.source;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import cn.hutool.json.JSONObject;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.fasterxml.jackson.databind.JavaType;
import com.google.common.base.Joiner;
import com.jayway.jsonpath.JsonPath;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import com.zdjizhi.common.KnowledgeLog;
import com.zdjizhi.utils.*;
import org.apache.commons.io.IOUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.*;
import java.util.concurrent.Executor;
public class SingleHttpSource extends RichHttpSourceFunction<Map<String, byte[]>> {
private static final Logger logger = LoggerFactory.getLogger(SingleHttpSource.class);
private static HashMap<String, byte[]> knowledgeFileCache;
private Properties nacosProperties;
private String NACOS_DATA_ID;
private String NACOS_GROUP;
private long NACOS_READ_TIMEOUT;
private static String STORE_PATH;
private ConfigService configService;
// private static JsonMapper jsonMapperInstance = JsonMapper.getInstance();
// private static JavaType listType = jsonMapperInstance.createCollectionType(List.class, KnowledgeLog.class);
private static final String EXPR = "$.[?(@.version=='latest' && @.name in ['ip_v4_built_in','ip_v6_built_in','ip_v4_user_defined','ip_v6_user_defined'])].['name','sha256','format','path']";
private static Map<String, String> updateMap = new HashMap<>();
private boolean isRunning = true;
public SingleHttpSource(Properties nacosProperties, String NACOS_DATA_ID, String NACOS_GROUP, long NACOS_READ_TIMEOUT) {
this.nacosProperties = nacosProperties;
this.NACOS_DATA_ID = NACOS_DATA_ID;
this.NACOS_GROUP = NACOS_GROUP;
this.NACOS_READ_TIMEOUT = NACOS_READ_TIMEOUT;
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
logger.info("连接nacos" + nacosProperties.getProperty(PropertyKeyConst.SERVER_ADDR));
configService = NacosFactory.createConfigService(nacosProperties);
//初始化元数据缓存
updateMap = new HashMap<>(16);
//初始化定位库缓存
knowledgeFileCache = new HashMap<>(16);
}
@Override
public void run(SourceContext ctx) throws Exception {
// ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
String config = configService.getConfig(NACOS_DATA_ID, NACOS_GROUP, NACOS_READ_TIMEOUT);
// List<CustomFile> customFiles = new ArrayList<>();
if (StringUtil.isNotBlank(config)) {
ArrayList<Object> metaList = JsonPath.parse(config).read(EXPR);
loadKnowledge(metaList);
}
// if (StringUtil.isNotBlank(config)) {
// List<KnowledgeLog> knowledgeLogListList = jsonMapperInstance.fromJson(config, listType);
// if (knowledgeLogListList.size()>=1){
// for (KnowledgeLog knowledgeLog : knowledgeLogListList) {
// String name = knowledgeLog.getName().concat(".").concat(knowledgeLog.getFormat());
// String sha256 = knowledgeLog.getSha256();
// updateMap.put(name,sha256);
// }
// }
// }
configService.addListener(NACOS_DATA_ID, NACOS_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
logger.info("receive update config:" + configMsg);
if (StringUtil.isNotBlank(configMsg)) {
ArrayList<Object> metaList = JsonPath.parse(configMsg).read(EXPR);
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
if (!sha256.equals(updateMap.get(fileName))) {
updateMap.put(fileName, sha256);
updateKnowledge(fileName, filePath);
}
}
ctx.collect(knowledgeFileCache);
}
}
} catch (Exception e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
while (isRunning) {
Thread.sleep(10000);
}
}
// private CustomFile loadKnowledge(String fileName, String filePath) {
// InputStream inputStream = null;
// FileOutputStream outputStream = null;
// CustomFile customFile = new CustomFile();
// try {
// customFile.setFileName(fileName);
// Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
// HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
// inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
// FileUtil.mkdir(CommonConfig.DOWNLOAD_PATH);
// File file = new File(CommonConfig.DOWNLOAD_PATH.concat(File.separator).concat(fileName));
// outputStream = new FileOutputStream(file);
// byte[] bytes = IOUtils.toByteArray(inputStream);
// customFile.setContent(bytes);
// inputStream = new ByteArrayInputStream(customFile.getContent());
// IoUtil.copy(inputStream, outputStream);
//
// } catch (IOException ioException) {
// ioException.printStackTrace();
// } finally {
// IOUtils.closeQuietly(inputStream);
// IOUtils.closeQuietly(outputStream);
// }
// return customFile;
// }
private void loadKnowledge(ArrayList<Object> metaList) {
InputStream inputStream = null;
try {
if (metaList.size() >= 1) {
for (Object metadata : metaList) {
JSONObject knowledgeJson = new JSONObject(metadata, false, true);
String fileName = Joiner.on(CommonConfig.LOCATION_SEPARATOR).useForNull("").join(knowledgeJson.getStr("name"),
knowledgeJson.getStr("format"));
String sha256 = knowledgeJson.getStr("sha256");
String filePath = knowledgeJson.getStr("path");
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
updateMap.put(fileName, sha256);
knowledgeFileCache.put(fileName, IOUtils.toByteArray(inputStream));
}
}
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
}
}
private void updateKnowledge(String fileName, String filePath) {
InputStream inputStream = null;
FileOutputStream outputStream = null;
try {
Header header = new BasicHeader("token", CommonConfig.HOS_TOKEN);
HttpClientUtils2 httpClientUtils = new HttpClientUtils2();
inputStream = httpClientUtils.httpGetInputStream(filePath, 3000, header);
FileUtil.mkdir(CommonConfig.DOWNLOAD_PATH);
File file = new File(CommonConfig.DOWNLOAD_PATH.concat(File.separator).concat(fileName));
outputStream = new FileOutputStream(file);
byte[] bytes = IOUtils.toByteArray(inputStream);
knowledgeFileCache.put(fileName, bytes);
inputStream=new ByteArrayInputStream(bytes);
IoUtil.copy(inputStream, outputStream);
} catch (IOException ioException) {
ioException.printStackTrace();
} finally {
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
}
}
@Override
public void cancel() {
this.isRunning = false;
}
}

View File

@@ -0,0 +1,23 @@
package com.zdjizhi.utils;
import java.util.Collection;
import java.util.HashSet;
/**
* @author wlh
* 扩展集合处理工具
*/
public class CollectionUtils {
public static<T> Collection<T> takeUniqueLimit(Collection<T> collection, int limit){
int i =0;
Collection<T> newSet = new HashSet<>();
for (T t:collection){
if (i < limit){
newSet.add(t);
i += 1;
}
}
return newSet;
}
}

View File

@@ -1,9 +1,9 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
@@ -17,7 +17,8 @@ import java.util.concurrent.locks.Lock;
public class DistributedLock implements Lock, Watcher {
private static final Logger logger = LoggerFactory.getLogger(DistributedLock.class);
// private static final Logger logger = LoggerFactory.getLogger(DistributedLock.class);
private static final Log logger = LogFactory.get();
private ZooKeeper zk = null;
/**

View File

@@ -0,0 +1,24 @@
package com.zdjizhi.utils;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
public class FileByteUtils {
public static byte[] getFileBytes (String filePath) throws IOException {
File file = new File(filePath);
FileInputStream fis = new FileInputStream(file);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
byte[] b = new byte[1024];
int n;
while ((n = fis.read(b)) != -1) {
bos.write(b, 0, n);
}
fis.close();
byte[] data = bos.toByteArray();
bos.close();
return data;
}
}

View File

@@ -1,9 +1,9 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
/**
@@ -12,17 +12,35 @@ import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
public class FlinkEnvironmentUtils {
public static StreamExecutionEnvironment streamExeEnv = StreamExecutionEnvironment.getExecutionEnvironment();
public static StreamTableEnvironment getStreamTableEnv() {
static {
streamExeEnv.setParallelism(CommonConfig.STREAM_EXECUTION_ENVIRONMENT_PARALLELISM);
EnvironmentSettings settings = EnvironmentSettings.newInstance()
.useBlinkPlanner()
.inStreamingMode()
.build();
/*
// 每 1000ms 开始一次 checkpoint
streamExeEnv.enableCheckpointing(CommonConfig.FLINK_WINDOW_MAX_TIME * 1000);
return StreamTableEnvironment.create(streamExeEnv, settings);
// 设置模式为精确一次 (这是默认值)
streamExeEnv.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
// 确认 checkpoints 之间的时间会进行 500 ms
streamExeEnv.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
// Checkpoint 必须在一分钟内完成,否则就会被抛弃
streamExeEnv.getCheckpointConfig().setCheckpointTimeout(60000);
// 允许两个连续的 checkpoint 错误
streamExeEnv.getCheckpointConfig().setTolerableCheckpointFailureNumber(2);
// 同一时间只允许一个 checkpoint 进行
streamExeEnv.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
// 使用 externalized checkpoints这样 checkpoint 在作业取消后仍就会被保留
streamExeEnv.getCheckpointConfig().enableExternalizedCheckpoints(
CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
// 开启实验性的 unaligned checkpoints
streamExeEnv.getCheckpointConfig().enableUnalignedCheckpoints();
*/
}
}

View File

@@ -1,5 +1,49 @@
package com.zdjizhi.utils;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.*;
/**
* @author wlh
*/
public class HbaseUtils {
public static Integer getIntegerValue(Result result, String family, String qualifier) {
byte[] value = result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier));
if (value != null){
return Bytes.toInt(value);
}
return 1;
}
public static ArrayList<Integer> getArraylist(Result result, String family, String qualifier) throws IOException {
if (containsColumn(result, family, qualifier)) {
ArrayWritable w = new ArrayWritable(IntWritable.class);
w.readFields(new DataInputStream(new ByteArrayInputStream(result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier)))));
return fromWritable(w);
}
return null;
}
private static ArrayList<Integer> fromWritable(ArrayWritable writable) {
Writable[] writables = writable.get();
ArrayList<Integer> list = new ArrayList<>(writables.length);
for (Writable wrt : writables) {
list.add(((IntWritable) wrt).get());
}
return list;
}
private static boolean containsColumn(Result result, String family, String qualifier) {
return result.containsColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
}
}

View File

@@ -0,0 +1,75 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
public class HdfsUtils {
private static final Log logger = LogFactory.get();
private static FileSystem fileSystem;
static {
Configuration configuration = new Configuration();
try {
//配置hdfs相关信息
configuration.set("fs.defaultFS","hdfs://ns1");
configuration.set("hadoop.proxyuser.root.hosts","*");
configuration.set("hadoop.proxyuser.root.groups","*");
configuration.set("ha.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
configuration.set("dfs.nameservices","ns1");
configuration.set("dfs.ha.namenodes.ns1","nn1,nn2");
configuration.set("dfs.namenode.rpc-address.ns1.nn1",CommonConfig.HDFS_URI_NS1);
configuration.set("dfs.namenode.rpc-address.ns1.nn2",CommonConfig.HDFS_URI_NS2);
configuration.set("dfs.client.failover.proxy.provider.ns1","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
//指定用户
System.setProperty("HADOOP_USER_NAME", CommonConfig.HDFS_USER);
//创建fileSystem,用于连接hdfs
fileSystem = FileSystem.get(configuration);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static boolean isExists(String filePath) throws IOException {
return fileSystem.exists(new Path(filePath));
}
public static byte[] getFileBytes(String filePath) throws IOException {
try (FSDataInputStream open = fileSystem.open(new Path(filePath))) {
byte[] bytes = new byte[open.available()];
open.read(0, bytes, 0, open.available());
return bytes;
} catch (IOException e) {
logger.error("An I/O exception when files are download from HDFS. Message is :" + e.getMessage());
}
return null;
}
public static void uploadFileByBytes(String filePath,byte[] bytes) throws IOException {
try (FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path(filePath), true)) {
fsDataOutputStream.write(bytes);
// fsDataOutputStream.flush();
} catch (RuntimeException e) {
logger.error("Uploading files to the HDFS is abnormal. Message is :" + e.getMessage());
} catch (IOException e) {
logger.error("An I/O exception when files are uploaded to HDFS. Message is :" + e.getMessage());
}
}
public static void rename(String src, String dst) throws IOException {
fileSystem.rename(new Path(src),new Path(dst));
}
}

View File

@@ -0,0 +1,270 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.apache.http.*;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLHandshakeException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
/**
* http client工具类
* @author wlh
*/
public class HttpClientUtils {
/** 全局连接池对象 */
private static final PoolingHttpClientConnectionManager CONN_MANAGER = new PoolingHttpClientConnectionManager();
// private static Logger logger = LoggerFactory.getLogger(HttpClientUtils.class);
private static final Log logger = LogFactory.get();
public static final String ERROR_MESSAGE = "-1";
/*
* 静态代码块配置连接池信息
*/
static {
// 设置最大连接数
CONN_MANAGER.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
CONN_MANAGER.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
}
/**
* 获取Http客户端连接对象
* @return Http客户端连接对象
*/
private static CloseableHttpClient getHttpClient() {
// 创建Http请求配置参数
RequestConfig requestConfig = RequestConfig.custom()
// 获取连接超时时间
.setConnectionRequestTimeout(CommonConfig.HTTP_POOL_REQUEST_TIMEOUT)
// 请求超时时间
.setConnectTimeout(CommonConfig.HTTP_POOL_CONNECT_TIMEOUT)
// 响应超时时间
.setSocketTimeout(CommonConfig.HTTP_POOL_RESPONSE_TIMEOUT)
.build();
/*
* 测出超时重试机制为了防止超时不生效而设置
* 如果直接放回false,不重试
* 这里会根据情况进行判断是否重试
*/
HttpRequestRetryHandler retry = (exception, executionCount, context) -> {
if (executionCount >= 3) {// 如果已经重试了3次就放弃
return false;
}
if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试
return true;
}
if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常
return false;
}
if (exception instanceof UnknownHostException) {// 目标服务器不可达
return false;
}
if (exception instanceof ConnectTimeoutException) {// 连接被拒绝
return false;
}
if (exception instanceof HttpHostConnectException) {// 连接被拒绝
return false;
}
if (exception instanceof SSLException) {// ssl握手异常
return false;
}
if (exception instanceof InterruptedIOException) {// 超时
return true;
}
HttpClientContext clientContext = HttpClientContext.adapt(context);
HttpRequest request = clientContext.getRequest();
// 如果请求是幂等的,就再次尝试
return !(request instanceof HttpEntityEnclosingRequest);
};
ConnectionKeepAliveStrategy myStrategy = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && "timeout".equalsIgnoreCase(param)) {
return Long.parseLong(value) * 1000;
}
}
return 60 * 1000;//如果没有约定则默认定义时长为60s
};
// 创建httpClient
return HttpClients.custom()
// 把请求相关的超时信息设置到连接客户端
.setDefaultRequestConfig(requestConfig)
// 把请求重试设置到连接客户端
.setRetryHandler(retry)
.setKeepAliveStrategy(myStrategy)
// 配置连接池管理对象
.setConnectionManager(CONN_MANAGER)
.build();
}
/**
* GET请求
*
* @param uri 请求地
* @return message
*/
public static String httpGet(URI uri, Header... headers) {
String msg = ERROR_MESSAGE;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();
CloseableHttpResponse response = null;
try {
logger.info("http get uri {}",uri);
// 创建GET请求对象
HttpGet httpGet = new HttpGet(uri);
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpGet.addHeader(h);
logger.info("request header : {}",h);
}
}
// 执行请求
response = httpClient.execute(httpGet);
int statusCode = response.getStatusLine().getStatusCode();
// 获取响应实体
HttpEntity entity = response.getEntity();
// 获取响应信息
msg = EntityUtils.toString(entity, "UTF-8");
if (statusCode != HttpStatus.SC_OK) {
logger.error("Http get content is :{}" , msg);
}
} catch (ClientProtocolException e) {
logger.error("协议错误: {}", e.getMessage());
} catch (ParseException e) {
logger.error("解析错误: {}", e.getMessage());
} catch (IOException e) {
logger.error("IO错误: {}",e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consume(response.getEntity());
response.close();
} catch (IOException e) {
logger.error("释放链接错误: {}", e.getMessage());
}
}
}
return msg;
}
/**
* POST 请求
* @param uri uri参数
* @param requestBody 请求体
* @return post请求返回结果
*/
public static String httpPost(URI uri, String requestBody, Header... headers) {
String msg = ERROR_MESSAGE;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();
// 创建POST请求对象
CloseableHttpResponse response = null;
try {
logger.info("http post uri:{} http post body:{}", uri, requestBody);
HttpPost httpPost = new HttpPost(uri);
httpPost.setHeader("Content-Type", "application/x-www-form-urlencoded");
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpPost.addHeader(h);
logger.info("request header : {}",h);
}
}
if(StringUtil.isNotBlank(requestBody)) {
byte[] bytes = requestBody.getBytes(StandardCharsets.UTF_8);
httpPost.setEntity(new ByteArrayEntity(bytes));
}
response = httpClient.execute(httpPost);
int statusCode = response.getStatusLine().getStatusCode();
// 获取响应实体
HttpEntity entity = response.getEntity();
// 获取响应信息
msg = EntityUtils.toString(entity, "UTF-8");
if (statusCode != HttpStatus.SC_OK) {
logger.error("Http post content is :{}" , msg);
}
} catch (ClientProtocolException e) {
logger.error("协议错误: {}", e.getMessage());
} catch (ParseException e) {
logger.error("解析错误: {}", e.getMessage());
} catch (IOException e) {
logger.error("IO错误: {}", e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consumeQuietly(response.getEntity());
response.close();
} catch (IOException e) {
logger.error("释放链接错误: {}", e.getMessage());
}
}
}
return msg;
}
/**
* 拼装url
* url ,参数map
*/
public static void setUrlWithParams(URIBuilder uriBuilder,String path, Map<String, Object> params) {
try {
uriBuilder.setPath(path);
if (params != null && !params.isEmpty()){
for (Map.Entry<String, Object> kv : params.entrySet()) {
uriBuilder.setParameter(kv.getKey(),kv.getValue().toString());
}
}
} catch (Exception e) {
logger.error("拼接url出错,uri : {}, path : {},参数: {}",uriBuilder.toString(),path,params);
}
}
}

View File

@@ -0,0 +1,234 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import org.apache.commons.io.IOUtils;
import org.apache.http.*;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.HttpHostConnectException;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.X509Certificate;
import java.util.Map;
import static org.apache.kafka.common.requests.FetchMetadata.log;
/**
* http client工具类
*/
public class HttpClientUtils2 {
/** 全局连接池对象 */
private static final PoolingHttpClientConnectionManager CONN_MANAGER = new PoolingHttpClientConnectionManager();
private static Logger logger = LoggerFactory.getLogger(HttpClientUtils2.class);
public static final String ERROR_MESSAGE = "-1";
/*
* 静态代码块配置连接池信息
*/
static {
// 设置最大连接数
CONN_MANAGER.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
CONN_MANAGER.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
}
/**
* 在调用SSL之前需要重写验证方法取消检测SSL
* 创建ConnectionManager添加Connection配置信息
*
* @return HttpClient 支持https
*/
private PoolingHttpClientConnectionManager getSslClientManager() {
try {
// 在调用SSL之前需要重写验证方法取消检测SSL
X509TrustManager trustManager = new X509TrustManager() {
@Override
public X509Certificate[] getAcceptedIssuers() {
return null;
}
@Override
public void checkClientTrusted(X509Certificate[] xcs, String str) {
}
@Override
public void checkServerTrusted(X509Certificate[] xcs, String str) {
}
};
SSLContext ctx = SSLContext.getInstance(SSLConnectionSocketFactory.TLS);
ctx.init(null, new TrustManager[]{trustManager}, null);
SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(ctx, NoopHostnameVerifier.INSTANCE);
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.INSTANCE)
.register("https", socketFactory).build();
// 创建ConnectionManager添加Connection配置信息
PoolingHttpClientConnectionManager connManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
// 设置最大连接数
connManager.setMaxTotal(CommonConfig.HTTP_POOL_MAX_CONNECTION);
// 设置每个连接的路由数
connManager.setDefaultMaxPerRoute(CommonConfig.HTTP_POOL_MAX_PER_ROUTE);
return connManager;
} catch (KeyManagementException | NoSuchAlgorithmException e) {
throw new RuntimeException(e.getMessage());
}
}
/**
* 获取Http客户端连接对象
* @return Http客户端连接对象
*/
private CloseableHttpClient getHttpClient() {
// 创建Http请求配置参数
RequestConfig requestConfig = RequestConfig.custom()
// 获取连接超时时间
.setConnectionRequestTimeout(CommonConfig.HTTP_POOL_REQUEST_TIMEOUT)
// 请求超时时间
.setConnectTimeout(CommonConfig.HTTP_POOL_CONNECT_TIMEOUT)
// 响应超时时间
.setSocketTimeout(CommonConfig.HTTP_POOL_RESPONSE_TIMEOUT)
.build();
/*
* 测出超时重试机制为了防止超时不生效而设置
* 如果直接放回false,不重试
* 这里会根据情况进行判断是否重试
*/
HttpRequestRetryHandler retry = (exception, executionCount, context) -> {
if (executionCount >= 3) {// 如果已经重试了3次就放弃
return false;
}
if (exception instanceof NoHttpResponseException) {// 如果服务器丢掉了连接,那么就重试
return true;
}
if (exception instanceof SSLHandshakeException) {// 不要重试SSL握手异常
return false;
}
if (exception instanceof UnknownHostException) {// 目标服务器不可达
return false;
}
if (exception instanceof ConnectTimeoutException) {// 连接被拒绝
return false;
}
if (exception instanceof HttpHostConnectException) {// 连接被拒绝
return false;
}
if (exception instanceof SSLException) {// ssl握手异常
return false;
}
if (exception instanceof InterruptedIOException) {// 超时
return true;
}
HttpClientContext clientContext = HttpClientContext.adapt(context);
HttpRequest request = clientContext.getRequest();
// 如果请求是幂等的,就再次尝试
return !(request instanceof HttpEntityEnclosingRequest);
};
ConnectionKeepAliveStrategy myStrategy = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && "timeout".equalsIgnoreCase(param)) {
return Long.parseLong(value) * 1000;
}
}
return 60 * 1000;//如果没有约定则默认定义时长为60s
};
// 创建httpClient
return HttpClients.custom()
// 把请求相关的超时信息设置到连接客户端
.setDefaultRequestConfig(requestConfig)
// 把请求重试设置到连接客户端
.setRetryHandler(retry)
.setKeepAliveStrategy(myStrategy)
// 配置连接池管理对象
.setConnectionManager(getSslClientManager())
.build();
}
// TODO: 2022/10/19 加载知识库
public InputStream httpGetInputStream(String url, int socketTimeout, Header... headers) {
InputStream result = null;
// 获取客户端连接对象
CloseableHttpClient httpClient = getHttpClient();// TODO: 2022/10/19 去掉了 socketTimeout
// 创建GET请求对象
HttpGet httpGet = new HttpGet(url);
if (StringUtil.isNotEmpty(headers)) {
for (Header h : headers) {
httpGet.addHeader(h);
}
}
CloseableHttpResponse response = null;
try {
// 执行请求
response = httpClient.execute(httpGet);
// 获取响应实体
result = IOUtils.toBufferedInputStream(response.getEntity().getContent());
// 获取响应信息
EntityUtils.consume(response.getEntity());
} catch (ClientProtocolException e) {
log.error("current file: {},Protocol error:{}", url, e.getMessage());
} catch (ParseException e) {
log.error("current file: {}, Parser error:{}", url, e.getMessage());
} catch (IOException e) {
log.error("current file: {},IO error:{}", url, e.getMessage());
} finally {
if (null != response) {
try {
EntityUtils.consume(response.getEntity());
response.close();
} catch (IOException e) {
log.error("Release Connection error:{}", e.getMessage());
}
}
return result;
}
}
}

View File

@@ -0,0 +1,21 @@
package com.zdjizhi.utils;
import lombok.Data;
/**
* @author fy
* @version 1.0
* @date 2022/10/19 18:27
*/
@Data
public class IpLocationConfiguration {
private String ipV4UserDefined;
private String ipV4BuiltIn;
private String ipV6UserDefined;
private String ipV6BuiltIn;
}

View File

@@ -1,21 +1,113 @@
package com.zdjizhi.utils;
import com.zdjizhi.common.CommonConfig;
import com.zdjizhi.common.CustomFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
public class IpUtils {
public static IpLookupV2 ipLookup ;
private static Logger LOG = LoggerFactory.getLogger(IpUtils.class);
/**
* IP定位库工具类
*/
public static IpLookup ipLookup = new IpLookup.Builder(false)
// .loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4.mmdb")
// .loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6.mmdb")
.loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_private_v4.mmdb")
// .loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_private_v6.mmdb")
.build();
// public static IpLookupV2 ipLookup = new IpLookupV2.Builder(false)
// .loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4_built_in.mmdb")
// .loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6_built_in.mmdb")
// .loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_v4_user_defined.mmdb")
// .loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_v6_user_defined.mmdb")
// .build();
public static void loadIpLook(){
try {
IpLookupV2.Builder builder = new IpLookupV2.Builder(false);
if ("CLUSTER".equals(CommonConfig.CLUSTER_OR_SINGLE)) {
byte[] ipv4BuiltBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v4_built_in.mmdb");
if (ipv4BuiltBytes!=null){
InputStream ipv4BuiltInputStream = new ByteArrayInputStream(ipv4BuiltBytes);
builder.loadDataFileV4(ipv4BuiltInputStream);
}
byte[] ipv6BuiltBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v6_built_in.mmdb");
if (ipv6BuiltBytes!=null){
InputStream ipv6BuiltInputStream = new ByteArrayInputStream(ipv6BuiltBytes);
builder.loadDataFileV6(ipv6BuiltInputStream);
}
byte[] ipv4UserBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v4_user_defined.mmdb");
if (ipv4UserBytes!=null){
InputStream ipv4UserInputStream = new ByteArrayInputStream(ipv4UserBytes);
builder.loadDataFilePrivateV4(ipv4UserInputStream);
}
byte[] ipv6UserBytes = HdfsUtils.getFileBytes(CommonConfig.HDFS_PATH + "ip_v6_user_defined.mmdb");
if (ipv6UserBytes!=null){
InputStream ipv6UserInputStream = new ByteArrayInputStream(ipv6UserBytes);
builder.loadDataFilePrivateV6(ipv6UserInputStream);
}
}else if ("SINGLE".equals(CommonConfig.CLUSTER_OR_SINGLE)){
byte[] ipv4BuiltBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v4_built_in.mmdb");
if (ipv4BuiltBytes!=null){
InputStream ipv4BuiltInputStream = new ByteArrayInputStream(ipv4BuiltBytes);
builder.loadDataFileV4(ipv4BuiltInputStream);
}
byte[] ipv6BuiltBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v6_built_in.mmdb");
if (ipv6BuiltBytes!=null){
InputStream ipv6BuiltInputStream = new ByteArrayInputStream(ipv6BuiltBytes);
builder.loadDataFileV6(ipv6BuiltInputStream);
}
byte[] ipv4UserBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v4_user_defined.mmdb");
if (ipv4UserBytes!=null){
InputStream ipv4UserInputStream = new ByteArrayInputStream(ipv4UserBytes);
builder.loadDataFilePrivateV4(ipv4UserInputStream);
}
byte[] ipv6UserBytes = FileByteUtils.getFileBytes(CommonConfig.DOWNLOAD_PATH + "ip_v6_user_defined.mmdb");
if (ipv6UserBytes!=null){
InputStream ipv6UserInputStream = new ByteArrayInputStream(ipv6UserBytes);
builder.loadDataFilePrivateV6(ipv6UserInputStream);
}
}
ipLookup = builder.build();
}catch (Exception e){
LOG.error("加载失败",e);
}
}
public static void updateIpLook(Map<String, byte[]> knowledgeFileCache){
try{
IpLookupV2.Builder builder = new IpLookupV2.Builder(false);
ipLookup= builder.loadDataFileV4(new ByteArrayInputStream(knowledgeFileCache.get("ip_v4_built_in.mmdb")))
.loadDataFileV6(new ByteArrayInputStream(knowledgeFileCache.get("ip_v6_built_in.mmdb")))
.loadDataFilePrivateV4(new ByteArrayInputStream(knowledgeFileCache.get("ip_v4_user_defined.mmdb")))
.loadDataFilePrivateV6(new ByteArrayInputStream(knowledgeFileCache.get("ip_v6_user_defined.mmdb")))
.build();
}catch (Exception e){
LOG.error("加载失败",e);
}
}
public static void main(String[] args) {
System.out.println(ipLookup.countryLookup("61.128.159.186"));
System.out.println(ipLookup.countryLookup("49.7.115.37"));
String ips = "182.168.50.23,182.168.50.45,182.168.56.9,182.168.56.8,92.168.50.58,19.168.56.7,12.168.56.6,2.168.50.40,1.168.50.19,9.168.50.6,2.168.50.4,192.168.56.17,192.168.50.27,192.168.50.26,192.168.50.18,192.168.56.3,192.168.56.10";
for (String ip:ips.split(",")){
System.out.println(ip+"--"+ipLookup.countryLookup(ip));
}
}

View File

@@ -4,23 +4,32 @@ import com.zdjizhi.common.CommonConfig;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import java.util.Optional;
import java.util.Properties;
public class KafkaUtils {
private static Properties getKafkaSinkProperty(){
Properties propertiesproducer = new Properties();
propertiesproducer.setProperty("bootstrap.servers", CommonConfig.KAFKA_OUTPUT_BOOTSTRAP_SERVERS);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", CommonConfig.KAFKA_OUTPUT_BOOTSTRAP_SERVERS);
if (CommonConfig.SASL_JAAS_CONFIG_FLAG == 1){
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "PLAIN");
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\""+CommonConfig.SASL_JAAS_CONFIG_USER+"\" password=\""+CommonConfig.SASL_JAAS_CONFIG_PASSWORD+"\";");
}
return propertiesproducer;
return properties;
}
public static FlinkKafkaProducer<String> getKafkaSink(String topic){
return new FlinkKafkaProducer<String>(
FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<>(
topic,
new SimpleStringSchema(),
getKafkaSinkProperty()
getKafkaSinkProperty(),
Optional.empty()
);
kafkaProducer.setLogFailuresOnly(true);
return kafkaProducer;
}
}

View File

@@ -0,0 +1,92 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import java.io.IOException;
import java.io.StringReader;
import java.util.Properties;
import java.util.concurrent.Executor;
public class NacosUtils {
// private static final Logger logger = LoggerFactory.getLogger(NacosUtils.class);
private static final Log logger = LogFactory.get();
private static Properties nacosProperties = new Properties();
private static Properties commonProperties = new Properties();
private static final String NACOS_SERVER_ADDR = CommonConfigurations.getStringProperty("nacos.server.addr");
private static final String NACOS_STATIC_NAMESPACE = CommonConfigurations.getStringProperty("nacos.static.namespace");
private static final String NACOS_USERNAME = CommonConfigurations.getStringProperty("nacos.username");
private static final String NACOS_PASSWORD = CommonConfigurations.getStringProperty("nacos.password");
private static final String NACOS_STATIC_DATA_ID = CommonConfigurations.getStringProperty("nacos.static.data.id");
private static final String NACOS_STATIC_GROUP = CommonConfigurations.getStringProperty("nacos.static.group");
private static final long NACOS_READ_TIMEOUT = CommonConfigurations.getLongProperty("nacos.read.timeout");
static {
createConfigService();
}
private static void getProperties() {
nacosProperties.setProperty(PropertyKeyConst.SERVER_ADDR, NACOS_SERVER_ADDR);
nacosProperties.setProperty(PropertyKeyConst.NAMESPACE, NACOS_STATIC_NAMESPACE);
nacosProperties.setProperty(PropertyKeyConst.USERNAME, NACOS_USERNAME);
nacosProperties.setProperty(PropertyKeyConst.PASSWORD, NACOS_PASSWORD);
}
private static void createConfigService() {
try {
getProperties();
ConfigService configService = NacosFactory.createConfigService(nacosProperties);
String config = configService.getConfig(NACOS_STATIC_DATA_ID, NACOS_STATIC_GROUP, NACOS_READ_TIMEOUT);
commonProperties.load(new StringReader(config));
configService.addListener(NACOS_STATIC_DATA_ID, NACOS_STATIC_GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
try {
commonProperties.clear();
commonProperties.load(new StringReader(configMsg));
} catch (IOException e) {
logger.error("监听nacos配置失败", e);
}
System.out.println(configMsg);
}
});
} catch (Exception e) {
e.printStackTrace();
logger.error("获取nacos配置失败", e);
}
}
public static String getStringProperty(String key) {
return commonProperties.getProperty(key);
}
public static Integer getIntProperty(String key) {
return Integer.parseInt(commonProperties.getProperty(key));
}
public static Double getDoubleProperty(String key) {
return Double.parseDouble(commonProperties.getProperty(key));
}
public static Long getLongProperty(String key) {
return Long.parseLong(commonProperties.getProperty(key));
}
public static Boolean getBooleanProperty(String key) {
return "true".equals(commonProperties.getProperty(key).toLowerCase().trim());
}
}

View File

@@ -1,11 +1,12 @@
package com.zdjizhi.utils;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.CommonConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SnowflakeId {
private static final Logger logger = LoggerFactory.getLogger(SnowflakeId.class);
// private static final Logger logger = LoggerFactory.getLogger(SnowflakeId.class);
private static final Log logger = LogFactory.get();
/**
* 共64位 第一位为符号位 默认0

View File

@@ -1,11 +1,11 @@
package com.zdjizhi.utils;
import cn.hutool.core.util.StrUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.zookeeper.*;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
@@ -13,7 +13,8 @@ import java.util.concurrent.CountDownLatch;
public class ZookeeperUtils implements Watcher {
private static final Logger logger = LoggerFactory.getLogger(ZookeeperUtils.class);
// private static final Logger logger = LoggerFactory.getLogger(ZookeeperUtils.class);
private static final Log logger = LogFactory.get();
private ZooKeeper zookeeper;

View File

@@ -1,41 +1,158 @@
#flink运行环境并行度其优先级低于算子并行度如果未设置算子并行度则使用该数值
stream.execution.environment.parallelism=1
stream.execution.job.name=dos-detection-job
kafka.input.parallelism=1
kafka.input.topic.name=DOS-SKETCH-LOG
kafka.input.bootstrap.servers=192.168.44.12:9092
kafka.input.scan.startup.mode=latest-offset
kafka.input.group.id=2108041426
#kafka.input.group.id=test
#flink任务名一般不变
stream.execution.job.name=DOS-DETECTION-APPLICATION
kafka.output.metric.parallelism=1
kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS-LOG
kafka.output.event.parallelism=1
kafka.output.event.topic.name=DOS-EVENT-LOG
kafka.output.bootstrap.servers=192.168.44.12:9092
#输入kafka并行度大小
kafka.input.parallelism=3
hbase.input.parallelism=1
#输入kafka topic名
kafka.input.topic.name=DOS-SKETCH-RECORD
#输入kafka地址
#kafka.input.bootstrap.servers=192.168.44.12:9094
kafka.input.bootstrap.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#读取kafka group id
kafka.input.group.id=dos-detection-job-221125-1
#kafka.input.group.id=dos-detection-job-210813-1
#发送kafka metrics并行度大小
kafka.output.metric.parallelism=3
#发送kafka metrics topic名
#kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS
kafka.output.metric.topic.name=test
#发送kafka event并行度大小
kafka.output.event.parallelism=3
#发送kafka event topic名
#kafka.output.event.topic.name=DOS-EVENT
kafka.output.event.topic.name=abcd
#kafka输出地址
kafka.output.bootstrap.servers=192.168.44.12:9094
#kafka.output.bootstrap.servers=192.168.44.11:9092,192.168.44.14:9092,192.168.44.15:9092
#zookeeper地址
hbase.zookeeper.quorum=192.168.44.12:2181
#hbase.zookeeper.quorum=192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181
#hbase.zookeeper.quorum=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase客户端处理时间
hbase.client.operation.timeout=30000
hbase.client.scanner.timeout.period=30000
hbase.baseline.table.name=ddos_traffic_baselines
##hbase baseline表名
hbase.baseline.table.name=dos:ddos_traffic_baselines
#读取baseline限制
hbase.baseline.total.num=1000000
#baseline ttl单位
hbase.baseline.ttl=10
#设置聚合并行度2个key
flink.first.agg.parallelism=1
flink.second.agg.parallelism=1
flink.watermark.max.orderness=1
#设置结果判定并行度
flink.detection.map.parallelism=1
#watermark延迟
flink.watermark.max.orderness=300
#计算窗口大小默认600s
flink.window.max.time=600
#dos event结果中distinct source IP限制
source.ip.list.limit=10000
#基于目的IP的分区数默认为10000一般不变
destination.ip.partition.num=10000
data.center.id.num=15
ip.mmdb.path=D:\\data\\dat_test\\
#IP mmdb库路径
ip.mmdb.path=D:\\data\\dat\\bak\\
#ip.mmdb.path=/home/bigdata/topology/dat/
#ip.mmdb.path=/home/bigdata/wlh/topology/dos-detection/dat/
baseline.sessions.minor.threshold=0.1
baseline.sessions.warning.threshold=0.5
baseline.sessions.major.threshold=1
baseline.sessions.severe.threshold=3
baseline.sessions.critical.threshold=8
#bifang服务访问地址
bifang.server.uri=http://192.168.44.72:80
#bifang.server.uri=http://192.168.44.3:80
#加密密码路径信息
bifang.server.encryptpwd.path=/v1/user/encryptpwd
#登录bifang服务路径信息
bifang.server.login.path=/v1/user/login
#获取vaysId路径信息
bifang.server.policy.vaysid.path=/v1/system/vsys/
#获取静态阈值路径信息
bifang.server.policy.threshold.path=/v1/policy/profile/DoS/detection/threshold
#http请求相关参数
#最大连接数
http.pool.max.connection=400
#单路由最大连接数
http.pool.max.per.route=80
#向服务端请求超时时间设置(单位:毫秒)
http.pool.request.timeout=60000
#向服务端连接超时时间设置(单位:毫秒)
http.pool.connect.timeout=60000
#服务端响应超时时间设置(单位:毫秒)
http.pool.response.timeout=60000
#获取静态阈值周期,默认十分钟
static.threshold.schedule.minutes=10
#获取baseline周期默认7天
baseline.threshold.schedule.days=1
#kafka用户认证配置参数
sasl.jaas.config.user=admin
#sasl.jaas.config.password=galaxy2019
sasl.jaas.config.password=6MleDyA3Z73HSaXiKsDJ2k7Ys8YWLhEJ
#是否开启kafka用户认证配置10
sasl.jaas.config.flag=1
############################## Nacos 配置 ######################################
nacos.server.addr=192.168.44.12:8848
nacos.username=nacos
nacos.password=nacos
nacos.read.timeout=5000
############################## Nacos ---知识库配置 ######################################
nacos.namespace=public
nacos.data.id=knowledge_base.json
nacos.group=DEFAULT_GROUP
############################## Nacos ---静态阈值配置 ######################################
nacos.static.namespace=test
nacos.static.data.id=dos_detection.properties
nacos.static.group=Galaxy
############################## hos Token 配置 ######################################
hos.token=c21f969b5f03d33d43e04f8f136e7682
############################# 选择集群模式或者单机模式 配置 ######################################
cluster.or.single=CLUSTER
#cluster.or.single=SINGLE
############################## 集群模式配置文件路径 配置 ######################################
hdfs.path=/test/TEST/
hdfs.uri.nn1=192.168.40.151:9000
hdfs.uri.nn2=192.168.40.152:9000
hdfs.user=dos
############################## 单机模式配置文件下载路径 配置 ######################################
download.path=D:\\ttt\\

View File

@@ -0,0 +1,58 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/tsg/olap/hadoop/tmp</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131702</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.logfile.size</name>
<value>10000000</value>
<description>The max size of each log file</description>
</property>
<property>
<name>hadoop.logfile.count</name>
<value>1</value>
<description>The max number of log files</description>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181</value>
</property>
<property>
<name>ipc.client.connect.timeout</name>
<value>90000</value>
</property>
</configuration>

View File

@@ -0,0 +1,142 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/tsg/olap/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/tsg/olap/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址nn1所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>192.168.40.151:9000</value>
</property>
<!-- nn1的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>192.168.40.151:50070</value>
</property>
<!-- nn2的RPC通信地址nn2所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>192.168.40.152:9000</value>
</property>
<!-- nn2的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>192.168.40.152:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://192.168.40.151:8485;192.168.40.152:8485;192.168.40.203:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/home/tsg/olap/hadoop/journal</value>
</property>
<!--客户端通过代理访问namenode访问文件系统HDFS 客户端与Active 节点通信的Java 类使用其确定Active 节点是否活跃 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(true)</value>
</property>
<!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间这个属性同上如果你是用脚本的方法切换这个应该是可以不配置的 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>8192</value>
</property>
<!-- namenode处理RPC请求线程数增大该值资源占用不大 -->
<property>
<name>dfs.namenode.handler.count</name>
<value>30</value>
</property>
<!-- datanode处理RPC请求线程数增大该值会占用更多内存 -->
<property>
<name>dfs.datanode.handler.count</name>
<value>40</value>
</property>
<!-- balance时可占用的带宽 -->
<property>
<name>dfs.balance.bandwidthPerSec</name>
<value>104857600</value>
</property>
<!-- 磁盘预留空间该空间不会被hdfs占用单位字节-->
<property>
<name>dfs.datanode.du.reserved</name>
<value>5368709120</value>
</property>
<!-- datanode与namenode连接超时时间单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
<property>
<name>heartbeat.recheck.interval</name>
<value>100000</value>
</property>
</configuration>

View File

@@ -0,0 +1,23 @@
#Log4j
log4j.rootLogger=info,console,file
# 控制台日志设置
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Threshold=info
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
# 文件日志设置
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
log4j.appender.file.Threshold=info
log4j.appender.file.encoding=UTF-8
log4j.appender.file.Append=true
#路径请用相对路径,做好相关测试输出到应用目下
log4j.appender.file.file=${nis.root}/log/flink-dos-detection.log
log4j.appender.file.DatePattern='.'yyyy-MM-dd
log4j.appender.file.layout=org.apache.log4j.PatternLayout
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n
#MyBatis 配置com.nis.web.dao是mybatis接口所在包
log4j.logger.com.nis.web.dao=debug
#bonecp数据源配置
log4j.category.com.jolbox=debug,console

View File

@@ -0,0 +1,196 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>rmcluster</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rsm1,rsm2</value>
</property>
<!-- 配置rm1-->
<property>
<name>yarn.resourcemanager.hostname.rsm1</name>
<value>192.168.40.152</value>
</property>
<property>
<!--<name>yarn.resourcemanager.hostname.rm1</name>-->
<name>yarn.resourcemanager.address.rsm1</name>
<value>192.168.40.152:9916</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rsm1</name>
<value>192.168.40.152:9917</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rsm1</name>
<value>192.168.40.152:9918</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rsm1</name>
<value>192.168.40.152:9919</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
<value>192.168.40.152:9920</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rsm1</name>
<value>192.168.40.152:23142</value>
</property>
<!-- 配置rm2-->
<property>
<name>yarn.resourcemanager.hostname.rsm2</name>
<value>192.168.40.203</value>
</property>
<property>
<!--<name>yarn.resourcemanager.hostname.rm1</name>-->
<name>yarn.resourcemanager.address.rsm2</name>
<value>192.168.40.203:9916</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address.rsm2</name>
<value>192.168.40.203:9917</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rsm2</name>
<value>192.168.40.203:9918</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address.rsm2</name>
<value>192.168.40.203:9919</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
<value>192.168.40.203:9920</value>
</property>
<property>
<name>yarn.resourcemanager.ha.admin.address.rsm2</name>
<value>192.168.40.203:23142</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>192.168.40.151:2181,192.168.40.152:2181,192.168.40.203:2181</value>
</property>
<!--启用自动恢复当任务进行一半rm坏掉就要启动自动恢复默认是false-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--启动nm自动恢复当集群重启container开启自动恢复保障任务的可靠性默认为false启动该配置需开启下面配置 -->
<property>
<name>yarn.nodemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--开启nm故障恢复后nm元数据存储路径 -->
<property>
<name>yarn.nodemanager.recovery.dir</name>
<value>/home/tsg/olap/hadoop-2.7.1/yarn</value>
</property>
<!--启用nm恢复时监控功能开启后不会去尝试清理container默认false -->
<property>
<name>yarn.nodemanager.recovery.supervised</name>
<value>true</value>
</property>
<!--配置nm可用的RPC地址默认${yarn.nodemanager.hostname}:0为临时端口。集群重启后nm与rm连接的端口会变化这里指定端口保障nm restart功能 -->
<property>
<name>yarn.nodemanager.address</name>
<value>${yarn.nodemanager.hostname}:9923</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>30720</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>30720</value>
</property>
<!--开启日志聚合 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.nodemanager.heartbeat-interval-ms</name>
<value>3000</value>
</property>
<!--日志保留7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<property>
<name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
<value>3600</value>
</property>
<property>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/tmp/logs</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>14</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-vcores</name>
<value>1</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-vcores</name>
<value>14</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.disk-health-checker.enable</name>
<value>false</value>
</property>
<!--ApplicationMaster重启次数配置HA后默认为2-->
<property>
<name>yarn.resourcemanager.am.max-attempts</name>
<value>10000</value>
</property>
<property>
<name>yarn.log.server.url</name>
<value>http://bigdata-151:19888/jobhistory/logs</value>
</property>
</configuration>

View File

@@ -0,0 +1,7 @@
package com.zdjizhi.common;
public class HttpTest {
public static void main(String[] args) throws Exception {
}
}

View File

@@ -0,0 +1,106 @@
package com.zdjizhi.common;
import inet.ipaddr.Address;
import inet.ipaddr.AddressStringException;
import inet.ipaddr.IPAddress;
import inet.ipaddr.IPAddressString;
import inet.ipaddr.format.util.AddressTrieMap;
import inet.ipaddr.format.util.AssociativeAddressTrie;
import inet.ipaddr.ipv4.IPv4Address;
import inet.ipaddr.ipv4.IPv4AddressAssociativeTrie;
import org.apache.flink.shaded.guava18.com.google.common.collect.Range;
import org.apache.flink.shaded.guava18.com.google.common.collect.TreeRangeMap;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class IpTest {
public static void main(String[] args) throws Exception {
IPv4AddressAssociativeTrie<Integer> trie = new IPv4AddressAssociativeTrie<>();
IPAddress str1 = new IPAddressString("1.2.3.4").getAddress();
IPAddress str2 = new IPAddressString("10.0.0.0/15").getAddress();
IPAddress str3 = new IPAddressString("25.4.2.0/23").getAddress();
IPAddress str4 = new IPAddressString("192.168.8.0/21").getAddress();
IPAddress str5 = new IPAddressString("240.0.0.0/4").getAddress();
IPAddress str6 = new IPAddressString("fc00::0/64").getAddress();
IPAddress str7 = new IPAddressString("fc00::10:1").getAddress();
TreeRangeMap<IPAddress, Object> rangeMap = TreeRangeMap.create();
rangeMap.put(Range.closed(str1.getLower(),str1.getUpper()),1);
rangeMap.put(Range.closed(str2.getLower(),str2.getUpper()),2);
rangeMap.put(Range.closed(str3.getLower(),str3.getUpper()),3);
rangeMap.put(Range.closed(str4.getLower(),str4.getUpper()),4);
rangeMap.put(Range.closed(str5.getLower(),str5.getUpper()),5);
rangeMap.put(Range.closed(str6.getLower(),str6.getUpper()),6);
rangeMap.put(Range.closed(str7.getLower(),str7.getUpper()),7);
IPAddress pv4 = new IPAddressString("255.255.14.255").getAddress();
IPAddress pv42 = new IPAddressString("1.2.3.4").getAddress();
IPAddress pv43 = new IPAddressString("fc00::").getAddress();
IPAddress pv44 = new IPAddressString("fc00::10:1").getAddress();
IPAddress pv45 = new IPAddressString("192.168.42.1").getAddress();
IPAddress pv46 = new IPAddressString("192.168.42.1/32").getAddress();
IPAddress pv47 = new IPAddressString("12.56.4.0").getAddress();
IPAddress mask = pv45.getNetwork().getNetworkMask(24, false);
System.out.println(pv45.isMultiple());
System.out.println(pv46.isMultiple());
System.out.println(pv46.isPrefixed());
System.out.println(pv47.isPrefixed());
System.out.println(pv45+"---"+pv45.toMaxHost().withoutPrefixLength()+"---"+pv45.adjustPrefixLength(pv45.getBitCount()));
System.out.println(pv45+"---mask:"+pv45.mask(mask).toString());
System.out.println(pv45.adjustPrefixLength(pv45.getBitCount())+"---"+pv45.toMaxHost().withoutPrefixLength());
/*
System.out.println(str5.getUpper()+"---"+str5.getLower());
System.out.println(rangeMap.span().contains(pv4));
System.out.println(rangeMap.get(pv4));
System.out.println(rangeMap.get(pv42));
System.out.println(rangeMap.get(pv43));
System.out.println(rangeMap.get(pv44));
*/
/*
System.out.println(str5.toSequentialRange());
// System.out.println(str2.contains(new IPAddressString("10.0.0.2")));
// System.out.println(str5.toAddress().toIPv4().toSequentialRange());
trie.put(str1,1);
trie.put(str2,2);
trie.put(str3,3);
trie.put(str4,4);
trie.put(str5,5);
AddressTrieMap<IPv4Address, Integer> trieMap = new AddressTrieMap<>(trie);
trieMap.forEach((k,v) -> {
System.out.println(k.toString() + "--" + v);
});
System.out.println("-----------------");
trie.forEach((k) -> System.out.println(k.toString()));
System.out.println(str5.contains(pv4));
System.out.println(trie.contains(pv4));
System.out.println(trieMap.get(pv4));
System.out.println(trieMap.containsKey(pv4));
// System.out.println(trieMap.getRange());
// IPAddress str3 = new IPAddressString("fc00::10:1").getAddress();
// IPAddress str4 = new IPAddressString("fc00::10:2/64").getAddress();
// System.out.println(Arrays.toString(str1.mergeToPrefixBlocks(str2,str3,str4)));
*/
}
}

View File

@@ -0,0 +1,101 @@
package com.zdjizhi.common;
import com.alibaba.nacos.api.NacosFactory;
import com.alibaba.nacos.api.PropertyKeyConst;
import com.alibaba.nacos.api.config.ConfigService;
import com.alibaba.nacos.api.config.listener.Listener;
import com.alibaba.nacos.api.exception.NacosException;
import org.junit.Test;
import java.io.IOException;
import java.io.StringReader;
import java.util.Properties;
import java.util.concurrent.Executor;
/**
* @author qidaijie
* @Package com.zdjizhi
* @Description:
* @date 2022/3/1016:58
*/
public class NacosTest {
/**
* <dependency>
* <groupId>com.alibaba.nacos</groupId>
* <artifactId>nacos-client</artifactId>
* <version>1.2.0</version>
* </dependency>
*/
private static Properties properties = new Properties();
/**
* config data id = config name
*/
private static final String DATA_ID = "dos_baseline.properties";
/**
* config group
*/
private static final String GROUP = "Galaxy";
private void getProperties() {
properties.setProperty(PropertyKeyConst.SERVER_ADDR, "192.168.44.12:8848");
properties.setProperty(PropertyKeyConst.NAMESPACE, "test");
properties.setProperty(PropertyKeyConst.USERNAME, "nacos");
properties.setProperty(PropertyKeyConst.PASSWORD, "nacos");
}
@Test
public void GetConfigurationTest() {
try {
getProperties();
ConfigService configService = NacosFactory.createConfigService(properties);
String content = configService.getConfig(DATA_ID, GROUP, 5000);
Properties nacosConfigMap = new Properties();
nacosConfigMap.load(new StringReader(content));
System.out.println(nacosConfigMap.getProperty("static.sensitivity.threshold"));
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void ListenerConfigurationTest() {
getProperties();
try {
//first get config
ConfigService configService = NacosFactory.createConfigService(properties);
String config = configService.getConfig(DATA_ID, GROUP, 5000);
// System.out.println(config);
//start listenner
configService.addListener(DATA_ID, GROUP, new Listener() {
@Override
public Executor getExecutor() {
return null;
}
@Override
public void receiveConfigInfo(String configMsg) {
System.out.println(configMsg);
}
});
} catch (Exception e) {
e.printStackTrace();
}
//keep running,change nacos config,print new config
/*
while (true) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
*/
}
}

View File

@@ -1,17 +0,0 @@
package com.zdjizhi.common;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;
public class UdtfTest extends TableFunction<Row> {
public void eval(Row[] rows) {
for (Row row : rows) {
collect(row);
}
}
public static void main(String[] args) {
}
}