flink-dos-detection first commit
This commit is contained in:
43
.gitignore
vendored
Normal file
43
.gitignore
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Created by .ignore support plugin (hsz.mobi)
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
*.zip
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
target/
|
||||
!.mvn/wrapper/maven-wrapper.jar
|
||||
|
||||
### STS ###
|
||||
.apt_generated
|
||||
.classpath
|
||||
.factorypath
|
||||
.project
|
||||
.settings
|
||||
.springBeans
|
||||
|
||||
### IntelliJ IDEA ###
|
||||
.idea
|
||||
*.iws
|
||||
*.iml
|
||||
*.ipr
|
||||
|
||||
### NetBeans ###
|
||||
nbproject/private/
|
||||
builds/
|
||||
nbbuild/
|
||||
dist/
|
||||
nbdist/
|
||||
.nb-gradle/
|
||||
|
||||
log/
|
||||
logs/
|
||||
253
pom.xml
Normal file
253
pom.xml
Normal file
@@ -0,0 +1,253 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>com.zdjizhi</groupId>
|
||||
<artifactId>flink-dos-detection</artifactId>
|
||||
<version>1.0-SNAPSHOT</version>
|
||||
|
||||
<properties>
|
||||
<flink.version>1.13.1</flink.version>
|
||||
<hive.version>2.1.1</hive.version>
|
||||
<hadoop.version>2.7.1</hadoop.version>
|
||||
</properties>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>nexus</id>
|
||||
<name>Team Nexus Repository</name>
|
||||
<url>http://192.168.40.125:8099/content/groups/public</url>
|
||||
</repository>
|
||||
|
||||
<repository>
|
||||
<id>ebi</id>
|
||||
<name>www.ebi.ac.uk</name>
|
||||
<url>http://www.ebi.ac.uk/intact/maven/nexus/content/groups/public/</url>
|
||||
</repository>
|
||||
|
||||
<repository>
|
||||
<id>maven-ali</id>
|
||||
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
|
||||
<releases>
|
||||
<enabled>true</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
<updatePolicy>always</updatePolicy>
|
||||
<checksumPolicy>fail</checksumPolicy>
|
||||
</snapshots>
|
||||
</repository>
|
||||
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<version>3.8.0</version>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
<!-- The semantics of this option are reversed, see MCOMPILER-209. -->
|
||||
<useIncrementalCompilation>false</useIncrementalCompilation>
|
||||
<compilerArgs>
|
||||
<!-- Prevents recompilation due to missing package-info.class, see MCOMPILER-205 -->
|
||||
<arg>-Xpkginfo:always</arg>
|
||||
</compilerArgs>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>flink-dos-detection</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
|
||||
<configuration>
|
||||
<finalName>flink-dos-detection</finalName>
|
||||
<filters>
|
||||
<filter>
|
||||
<!-- Do not copy the signatures in the META-INF folder.
|
||||
Otherwise, this might cause SecurityExceptions when using the JAR. -->
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
<transformers>
|
||||
<transformer
|
||||
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||
<mainClass>com.zdjizhi.main.DosDetectionApplication</mainClass>
|
||||
</transformer>
|
||||
</transformers>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.21</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<version>1.7.21</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-sql-connector-kafka_2.11</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-connector-kafka_2.11</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
|
||||
<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-table -->
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-table</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
|
||||
<!-- https://mvnrepository.com/artifact/org.apache.flink/flink-json -->
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-json</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>1.0.0</version>
|
||||
</dependency>
|
||||
|
||||
<!--Flink modules-->
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-table-api-java</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-table-planner-blink_2.11</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-table-planner_2.11</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
|
||||
<!-- CLI dependencies -->
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-clients_2.11</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
<!--<scope>provided</scope>-->
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<version>2.7.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!--<dependency>-->
|
||||
<!--<groupId>org.apache.flink</groupId>-->
|
||||
<!--<artifactId>flink-table-api-java-bridge_2.11</artifactId>-->
|
||||
<!--<version>${flink.version}</version>-->
|
||||
<!--<!–<scope>provided</scope>–>-->
|
||||
<!--</dependency>-->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
<version>3.4.9</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<groupId>org.slf4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>log4j-over-slf4j</artifactId>
|
||||
<groupId>org.slf4j</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.flink</groupId>
|
||||
<artifactId>flink-connector-hbase-2.2_2.11</artifactId>
|
||||
<version>${flink.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>cn.hutool</groupId>
|
||||
<artifactId>hutool-all</artifactId>
|
||||
<version>5.5.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.zdjizhi</groupId>
|
||||
<artifactId>galaxy</artifactId>
|
||||
<version>1.0.4</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<groupId>org.slf4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>log4j-over-slf4j</artifactId>
|
||||
<groupId>org.slf4j</groupId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
|
||||
</dependencies>
|
||||
|
||||
|
||||
</project>
|
||||
47
src/main/java/com/zdjizhi/common/CommonConfig.java
Normal file
47
src/main/java/com/zdjizhi/common/CommonConfig.java
Normal file
@@ -0,0 +1,47 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
import com.zdjizhi.utils.CommonConfigurations;
|
||||
|
||||
/**
|
||||
* Created by wk on 2021/1/6.
|
||||
*/
|
||||
public class CommonConfig {
|
||||
|
||||
public static final int STREAM_EXECUTION_ENVIRONMENT_PARALLELISM = CommonConfigurations.getIntProperty("stream.execution.environment.parallelism");
|
||||
|
||||
public static final int KAFKA_INPUT_PARALLELISM = CommonConfigurations.getIntProperty("kafka.input.parallelism");
|
||||
public static final String KAFKA_INPUT_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.input.topic.name");
|
||||
public static final String KAFKA_INPUT_BOOTSTRAP_SERVERS = CommonConfigurations.getStringProperty("kafka.input.bootstrap.servers");
|
||||
public static final String KAFKA_SCAN_STARTUP_MODE = CommonConfigurations.getStringProperty("kafka.input.scan.startup.mode");
|
||||
public static final String KAFKA_GROUP_ID = CommonConfigurations.getStringProperty("kafka.input.group.id");
|
||||
|
||||
public static final int KAFKA_OUTPUT_METRIC_PARALLELISM = CommonConfigurations.getIntProperty("kafka.output.metric.parallelism");
|
||||
public static final String KAFKA_OUTPUT_METRIC_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.output.metric.topic.name");
|
||||
public static final int KAFKA_OUTPUT_EVENT_PARALLELISM = CommonConfigurations.getIntProperty("kafka.output.event.parallelism");
|
||||
public static final String KAFKA_OUTPUT_EVENT_TOPIC_NAME = CommonConfigurations.getStringProperty("kafka.output.event.topic.name");
|
||||
public static final String KAFKA_OUTPUT_BOOTSTRAP_SERVERS = CommonConfigurations.getStringProperty("kafka.output.bootstrap.servers");
|
||||
|
||||
public static final String HBASE_ZOOKEEPER_QUORUM = CommonConfigurations.getStringProperty("hbase.zookeeper.quorum");
|
||||
public static final String HBASE_ZOOKEEPER_CLIENT_PORT = CommonConfigurations.getStringProperty("hbase.zookeeper.client.port");
|
||||
public static final int HBASE_CLIENT_OPERATION_TIMEOUT = CommonConfigurations.getIntProperty("hbase.client.operation.timeout");
|
||||
public static final int HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD = CommonConfigurations.getIntProperty("hbase.client.scanner.timeout.period");
|
||||
|
||||
public static final String HBASE_BASELINE_TABLE_NAME = CommonConfigurations.getStringProperty("hbase.baseline.table.name");
|
||||
public static final String HBASE_BASELINE_FAMLIY_NAME = CommonConfigurations.getStringProperty("hbase.baseline.famliy.name");
|
||||
public static final int HBASE_BASELINE_TOTAL_NUM = CommonConfigurations.getIntProperty("hbase.baseline.total.num");
|
||||
|
||||
public static final int FLINK_WATERMARK_MAX_ORDERNESS = CommonConfigurations.getIntProperty("flink.watermark.max.orderness");
|
||||
public static final int FLINK_WINDOW_MAX_TIME = CommonConfigurations.getIntProperty("flink.window.max.time");
|
||||
|
||||
public static final int SOURCE_IP_LIST_LIMIT = CommonConfigurations.getIntProperty("source.ip.list.limit");
|
||||
public static final int DATA_CENTER_ID_NUM = CommonConfigurations.getIntProperty("data.center.id.num");
|
||||
|
||||
public static final String IP_MMDB_PATH = CommonConfigurations.getStringProperty("ip.mmdb.path");
|
||||
|
||||
public static final double BASELINE_SESSIONS_MINOR_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.minor.threshold");
|
||||
public static final double BASELINE_SESSIONS_WARNING_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.warning.threshold");
|
||||
public static final double BASELINE_SESSIONS_MAJOR_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.major.threshold");
|
||||
public static final double BASELINE_SESSIONS_SEVERE_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.severe.threshold");
|
||||
public static final double BASELINE_SESSIONS_CRITICAL_THRESHOLD = CommonConfigurations.getDoubleProperty("baseline.sessions.critical.threshold");
|
||||
|
||||
}
|
||||
143
src/main/java/com/zdjizhi/common/DosEventLog.java
Normal file
143
src/main/java/com/zdjizhi/common/DosEventLog.java
Normal file
@@ -0,0 +1,143 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class DosEventLog implements Serializable {
|
||||
|
||||
private long log_id;
|
||||
private long start_time;
|
||||
private long end_time;
|
||||
private String attack_type;
|
||||
private String severity;
|
||||
private String conditions;
|
||||
private String destination_ip;
|
||||
private String destination_country;
|
||||
private String source_ip_list;
|
||||
private String source_country_list;
|
||||
private long session_rate;
|
||||
private long packet_rate;
|
||||
private long bit_rate;
|
||||
|
||||
public long getLog_id() {
|
||||
return log_id;
|
||||
}
|
||||
|
||||
public void setLog_id(long log_id) {
|
||||
this.log_id = log_id;
|
||||
}
|
||||
|
||||
public long getStart_time() {
|
||||
return start_time;
|
||||
}
|
||||
|
||||
public void setStart_time(long start_time) {
|
||||
this.start_time = start_time;
|
||||
}
|
||||
|
||||
public long getEnd_time() {
|
||||
return end_time;
|
||||
}
|
||||
|
||||
public void setEnd_time(long end_time) {
|
||||
this.end_time = end_time;
|
||||
}
|
||||
|
||||
public String getAttack_type() {
|
||||
return attack_type;
|
||||
}
|
||||
|
||||
public void setAttack_type(String attack_type) {
|
||||
this.attack_type = attack_type;
|
||||
}
|
||||
|
||||
public String getSeverity() {
|
||||
return severity;
|
||||
}
|
||||
|
||||
public void setSeverity(String severity) {
|
||||
this.severity = severity;
|
||||
}
|
||||
|
||||
public String getConditions() {
|
||||
return conditions;
|
||||
}
|
||||
|
||||
public void setConditions(String conditions) {
|
||||
this.conditions = conditions;
|
||||
}
|
||||
|
||||
public String getDestination_ip() {
|
||||
return destination_ip;
|
||||
}
|
||||
|
||||
public void setDestination_ip(String destination_ip) {
|
||||
this.destination_ip = destination_ip;
|
||||
}
|
||||
|
||||
public String getDestination_country() {
|
||||
return destination_country;
|
||||
}
|
||||
|
||||
public void setDestination_country(String destination_country) {
|
||||
this.destination_country = destination_country;
|
||||
}
|
||||
|
||||
public String getSource_ip_list() {
|
||||
return source_ip_list;
|
||||
}
|
||||
|
||||
public void setSource_ip_list(String source_ip_list) {
|
||||
this.source_ip_list = source_ip_list;
|
||||
}
|
||||
|
||||
public String getSource_country_list() {
|
||||
return source_country_list;
|
||||
}
|
||||
|
||||
public void setSource_country_list(String source_country_list) {
|
||||
this.source_country_list = source_country_list;
|
||||
}
|
||||
|
||||
public long getSession_rate() {
|
||||
return session_rate;
|
||||
}
|
||||
|
||||
public void setSession_rate(long session_rate) {
|
||||
this.session_rate = session_rate;
|
||||
}
|
||||
|
||||
public long getPacket_rate() {
|
||||
return packet_rate;
|
||||
}
|
||||
|
||||
public void setPacket_rate(long packet_rate) {
|
||||
this.packet_rate = packet_rate;
|
||||
}
|
||||
|
||||
public long getBit_rate() {
|
||||
return bit_rate;
|
||||
}
|
||||
|
||||
public void setBit_rate(long bit_rate) {
|
||||
this.bit_rate = bit_rate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "dosEventLog{" +
|
||||
"log_id=" + log_id +
|
||||
", start_time=" + start_time +
|
||||
", end_time=" + end_time +
|
||||
", attack_type='" + attack_type + '\'' +
|
||||
", severity='" + severity + '\'' +
|
||||
", conditions='" + conditions + '\'' +
|
||||
", destination_ip='" + destination_ip + '\'' +
|
||||
", destination_country='" + destination_country + '\'' +
|
||||
", source_ip_list='" + source_ip_list + '\'' +
|
||||
", source_country_list='" + source_country_list + '\'' +
|
||||
", session_rate=" + session_rate +
|
||||
", packet_rate=" + packet_rate +
|
||||
", bit_rate=" + bit_rate +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
93
src/main/java/com/zdjizhi/common/DosMetricsLog.java
Normal file
93
src/main/java/com/zdjizhi/common/DosMetricsLog.java
Normal file
@@ -0,0 +1,93 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class DosMetricsLog implements Serializable {
|
||||
|
||||
private long sketch_start_time;
|
||||
private String common_sled_ip;
|
||||
private String common_data_center;
|
||||
private String attack_type;
|
||||
private String destination_ip;
|
||||
private long session_rate;
|
||||
private long packet_rate;
|
||||
private long bit_rate;
|
||||
|
||||
public long getSketch_start_time() {
|
||||
return sketch_start_time;
|
||||
}
|
||||
|
||||
public void setSketch_start_time(long sketch_start_time) {
|
||||
this.sketch_start_time = sketch_start_time;
|
||||
}
|
||||
|
||||
public String getCommon_sled_ip() {
|
||||
return common_sled_ip;
|
||||
}
|
||||
|
||||
public void setCommon_sled_ip(String common_sled_ip) {
|
||||
this.common_sled_ip = common_sled_ip;
|
||||
}
|
||||
|
||||
public String getCommon_data_center() {
|
||||
return common_data_center;
|
||||
}
|
||||
|
||||
public void setCommon_data_center(String common_data_center) {
|
||||
this.common_data_center = common_data_center;
|
||||
}
|
||||
|
||||
public String getAttack_type() {
|
||||
return attack_type;
|
||||
}
|
||||
|
||||
public void setAttack_type(String attack_type) {
|
||||
this.attack_type = attack_type;
|
||||
}
|
||||
|
||||
public String getDestination_ip() {
|
||||
return destination_ip;
|
||||
}
|
||||
|
||||
public void setDestination_ip(String destination_ip) {
|
||||
this.destination_ip = destination_ip;
|
||||
}
|
||||
|
||||
public long getSession_rate() {
|
||||
return session_rate;
|
||||
}
|
||||
|
||||
public void setSession_rate(long session_rate) {
|
||||
this.session_rate = session_rate;
|
||||
}
|
||||
|
||||
public long getPacket_rate() {
|
||||
return packet_rate;
|
||||
}
|
||||
|
||||
public void setPacket_rate(long packet_rate) {
|
||||
this.packet_rate = packet_rate;
|
||||
}
|
||||
|
||||
public long getBit_rate() {
|
||||
return bit_rate;
|
||||
}
|
||||
|
||||
public void setBit_rate(long bit_rate) {
|
||||
this.bit_rate = bit_rate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DosMetricsLog{" +
|
||||
"sketch_start_time=" + sketch_start_time +
|
||||
", common_sled_ip='" + common_sled_ip + '\'' +
|
||||
", common_data_center='" + common_data_center + '\'' +
|
||||
", attack_type='" + attack_type + '\'' +
|
||||
", destination_ip='" + destination_ip + '\'' +
|
||||
", session_rate=" + session_rate +
|
||||
", packet_rate=" + packet_rate +
|
||||
", bit_rate=" + bit_rate +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
113
src/main/java/com/zdjizhi/common/DosSketchLog.java
Normal file
113
src/main/java/com/zdjizhi/common/DosSketchLog.java
Normal file
@@ -0,0 +1,113 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
public class DosSketchLog implements Serializable {
|
||||
|
||||
private String common_sled_ip;
|
||||
private String common_data_center;
|
||||
private long sketch_start_time;
|
||||
private long sketch_duration;
|
||||
private String attack_type;
|
||||
private String source_ip;
|
||||
private String destination_ip;
|
||||
private long sketch_sessions;
|
||||
private long sketch_packets;
|
||||
private long sketch_bytes;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DosSketchLog{" +
|
||||
"common_sled_ip='" + common_sled_ip + '\'' +
|
||||
", common_data_center='" + common_data_center + '\'' +
|
||||
", sketch_start_time=" + sketch_start_time +
|
||||
", sketch_duration=" + sketch_duration +
|
||||
", attack_type='" + attack_type + '\'' +
|
||||
", source_ip='" + source_ip + '\'' +
|
||||
", destination_ip='" + destination_ip + '\'' +
|
||||
", sketch_sessions=" + sketch_sessions +
|
||||
", sketch_packets=" + sketch_packets +
|
||||
", sketch_bytes=" + sketch_bytes +
|
||||
'}';
|
||||
}
|
||||
|
||||
public String getCommon_sled_ip() {
|
||||
return common_sled_ip;
|
||||
}
|
||||
|
||||
public void setCommon_sled_ip(String common_sled_ip) {
|
||||
this.common_sled_ip = common_sled_ip;
|
||||
}
|
||||
|
||||
public String getCommon_data_center() {
|
||||
return common_data_center;
|
||||
}
|
||||
|
||||
public void setCommon_data_center(String common_data_center) {
|
||||
this.common_data_center = common_data_center;
|
||||
}
|
||||
|
||||
public long getSketch_start_time() {
|
||||
return sketch_start_time;
|
||||
}
|
||||
|
||||
public void setSketch_start_time(long sketch_start_time) {
|
||||
this.sketch_start_time = sketch_start_time;
|
||||
}
|
||||
|
||||
public long getSketch_duration() {
|
||||
return sketch_duration;
|
||||
}
|
||||
|
||||
public void setSketch_duration(long sketch_duration) {
|
||||
this.sketch_duration = sketch_duration;
|
||||
}
|
||||
|
||||
public String getAttack_type() {
|
||||
return attack_type;
|
||||
}
|
||||
|
||||
public void setAttack_type(String attack_type) {
|
||||
this.attack_type = attack_type;
|
||||
}
|
||||
|
||||
public String getSource_ip() {
|
||||
return source_ip;
|
||||
}
|
||||
|
||||
public void setSource_ip(String source_ip) {
|
||||
this.source_ip = source_ip;
|
||||
}
|
||||
|
||||
public String getDestination_ip() {
|
||||
return destination_ip;
|
||||
}
|
||||
|
||||
public void setDestination_ip(String destination_ip) {
|
||||
this.destination_ip = destination_ip;
|
||||
}
|
||||
|
||||
public long getSketch_sessions() {
|
||||
return sketch_sessions;
|
||||
}
|
||||
|
||||
public void setSketch_sessions(long sketch_sessions) {
|
||||
this.sketch_sessions = sketch_sessions;
|
||||
}
|
||||
|
||||
public long getSketch_packets() {
|
||||
return sketch_packets;
|
||||
}
|
||||
|
||||
public void setSketch_packets(long sketch_packets) {
|
||||
this.sketch_packets = sketch_packets;
|
||||
}
|
||||
|
||||
public long getSketch_bytes() {
|
||||
return sketch_bytes;
|
||||
}
|
||||
|
||||
public void setSketch_bytes(long sketch_bytes) {
|
||||
this.sketch_bytes = sketch_bytes;
|
||||
}
|
||||
}
|
||||
177
src/main/java/com/zdjizhi/etl/DosDetection.java
Normal file
177
src/main/java/com/zdjizhi/etl/DosDetection.java
Normal file
@@ -0,0 +1,177 @@
|
||||
package com.zdjizhi.etl;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosEventLog;
|
||||
import com.zdjizhi.common.DosSketchLog;
|
||||
import com.zdjizhi.sink.OutputStreamSink;
|
||||
import com.zdjizhi.utils.IpUtils;
|
||||
import com.zdjizhi.utils.SnowflakeId;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.flink.configuration.Configuration;
|
||||
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
|
||||
import org.apache.flink.util.Collector;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.text.ParseException;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author wlh
|
||||
* DoS检测判断逻辑
|
||||
*/
|
||||
public class DosDetection extends BroadcastProcessFunction<DosSketchLog, Map<String, Map<String, List<Integer>>>, DosEventLog> {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(DosDetection.class);
|
||||
|
||||
private final static int BASELINESIZE = 144;
|
||||
|
||||
private final static NumberFormat PERCENTINSTANCE = NumberFormat.getPercentInstance();
|
||||
|
||||
@Override
|
||||
public void open(Configuration parameters) {
|
||||
PERCENTINSTANCE.setMinimumFractionDigits(2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processElement(DosSketchLog value, ReadOnlyContext ctx, Collector<DosEventLog> out) throws Exception {
|
||||
Map<String, Map<String, List<Integer>>> broadcast = ctx.getBroadcastState(OutputStreamSink.descriptor).get("broadcast-state");
|
||||
String destinationIp = value.getDestination_ip();
|
||||
String attackType = value.getAttack_type();
|
||||
logger.info("当前判断数据:{}",value.toString());
|
||||
if (broadcast.containsKey(destinationIp)){
|
||||
List<Integer> baseline = broadcast.get(destinationIp).get(attackType);
|
||||
if (baseline != null && baseline.size() == BASELINESIZE){
|
||||
int timeIndex = getCurrentTimeIndex(value.getSketch_start_time());
|
||||
Integer base = baseline.get(timeIndex);
|
||||
long sketchSessions = value.getSketch_sessions();
|
||||
long diff = sketchSessions - base;
|
||||
if (diff > 0){
|
||||
String percent = getDiffPercent(diff, sketchSessions);
|
||||
double diffPercentDouble = getDiffPercentDouble(percent);
|
||||
Severity severity = judgeSeverity(diffPercentDouble);
|
||||
if (severity != Severity.NORMAL){
|
||||
DosEventLog result = getResult(value, severity, percent);
|
||||
logger.info("检测到当前server IP {} 存在异常,日志详情 {}",destinationIp,result.toString());
|
||||
out.collect(result);
|
||||
}else {
|
||||
logger.info("当前server IP:{} 未出现异常,日志详情 {}",destinationIp,value.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}else {
|
||||
logger.info("未获取到当前server IP:{} baseline数据",destinationIp);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processBroadcastElement(Map<String, Map<String, List<Integer>>> value, Context ctx, Collector<DosEventLog> out) throws Exception {
|
||||
ctx.getBroadcastState(OutputStreamSink.descriptor).put("broadcast-state", value);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
DosDetection dosDetection = new DosDetection();
|
||||
// HashSet<String> strings = new HashSet<>();
|
||||
// strings.add("13.46.241.36");
|
||||
// strings.add("25.46.241.45");
|
||||
// strings.add("133.46.241.53");
|
||||
// strings.add("219.46.242.74");
|
||||
// strings.add("153.146.241.196");
|
||||
// strings.add("132.46.241.21");
|
||||
// String join = StringUtils.join(strings, ",");
|
||||
System.out.println(dosDetection.getCurrentTimeIndex(1627378879));
|
||||
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
private DosEventLog getResult(DosSketchLog value,Severity severity,String percent){
|
||||
DosEventLog dosEventLog = new DosEventLog();
|
||||
dosEventLog.setLog_id(SnowflakeId.generateId());
|
||||
dosEventLog.setStart_time(value.getSketch_start_time());
|
||||
dosEventLog.setEnd_time(value.getSketch_start_time()+CommonConfig.FLINK_WINDOW_MAX_TIME);
|
||||
dosEventLog.setAttack_type(value.getAttack_type());
|
||||
dosEventLog.setSeverity(severity.name());
|
||||
dosEventLog.setConditions(getConditions(percent));
|
||||
dosEventLog.setDestination_ip(value.getDestination_ip());
|
||||
dosEventLog.setDestination_country(IpUtils.ipLookup.countryLookup(value.getDestination_ip()));
|
||||
String ipList = value.getSource_ip();
|
||||
dosEventLog.setSource_ip_list(ipList);
|
||||
dosEventLog.setSource_country_list(getSourceCountryList(ipList));
|
||||
dosEventLog.setSession_rate(value.getSketch_sessions());
|
||||
dosEventLog.setPacket_rate(value.getSketch_packets());
|
||||
dosEventLog.setBit_rate(value.getSketch_bytes());
|
||||
return dosEventLog;
|
||||
}
|
||||
|
||||
private String getConditions(String percent){
|
||||
return "sessions > "+percent+" of baseline";
|
||||
}
|
||||
|
||||
private String getSourceCountryList(String sourceIpList){
|
||||
String[] ipArr = sourceIpList.split(",");
|
||||
HashSet<String> countrySet = new HashSet<>();
|
||||
for (String ip:ipArr){
|
||||
countrySet.add(IpUtils.ipLookup.countryLookup(ip));
|
||||
}
|
||||
return StringUtils.join(countrySet,",");
|
||||
}
|
||||
|
||||
private int getCurrentTimeIndex(long sketchStartTime){
|
||||
long currentDayTime = sketchStartTime / (60 * 60 * 24) * 60 * 60 * 24;
|
||||
long indexLong = (sketchStartTime - currentDayTime) / 600;
|
||||
return Integer.parseInt(Long.toString(indexLong));
|
||||
}
|
||||
|
||||
private String getDiffPercent(long diff,long sketchSessions){
|
||||
double diffDou = Double.parseDouble(Long.toString(diff));
|
||||
double sessDou = Double.parseDouble(Long.toString(sketchSessions));
|
||||
return PERCENTINSTANCE.format(diffDou / sessDou);
|
||||
}
|
||||
|
||||
private double getDiffPercentDouble(String diffPercent) throws ParseException {
|
||||
return PERCENTINSTANCE.parse(diffPercent).doubleValue();
|
||||
}
|
||||
|
||||
private Severity judgeSeverity(double diffPercent){
|
||||
if (diffPercent >= CommonConfig.BASELINE_SESSIONS_MINOR_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_WARNING_THRESHOLD){
|
||||
return Severity.MINOR;
|
||||
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_WARNING_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_MAJOR_THRESHOLD){
|
||||
return Severity.WARNING;
|
||||
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_MAJOR_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_SEVERE_THRESHOLD){
|
||||
return Severity.MAJOR;
|
||||
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_SEVERE_THRESHOLD && diffPercent < CommonConfig.BASELINE_SESSIONS_CRITICAL_THRESHOLD){
|
||||
return Severity.SEVERE;
|
||||
}else if (diffPercent >= CommonConfig.BASELINE_SESSIONS_CRITICAL_THRESHOLD){
|
||||
return Severity.CRITICAL;
|
||||
}else {
|
||||
return Severity.NORMAL;
|
||||
}
|
||||
}
|
||||
|
||||
private enum Severity {
|
||||
/**
|
||||
* 判断严重程度枚举类型
|
||||
*/
|
||||
CRITICAL("Critical"),
|
||||
SEVERE("Severe"),
|
||||
MAJOR("Major"),
|
||||
WARNING("Warning"),
|
||||
MINOR("Minor"),
|
||||
NORMAL("Normal");
|
||||
|
||||
private final String severity;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.severity;
|
||||
}
|
||||
|
||||
Severity(String severity) {
|
||||
this.severity = severity;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
85
src/main/java/com/zdjizhi/etl/EtlProcessFunction.java
Normal file
85
src/main/java/com/zdjizhi/etl/EtlProcessFunction.java
Normal file
@@ -0,0 +1,85 @@
|
||||
package com.zdjizhi.etl;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosSketchLog;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.flink.api.java.tuple.Tuple4;
|
||||
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
|
||||
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
|
||||
import org.apache.flink.util.Collector;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.HashSet;
|
||||
|
||||
import static com.zdjizhi.sink.OutputStreamSink.outputTag;
|
||||
|
||||
/**
|
||||
* @author 94976
|
||||
*/
|
||||
public class EtlProcessFunction extends ProcessWindowFunction<DosSketchLog, DosSketchLog, Tuple4<String,String,String,String>, TimeWindow> {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(EtlProcessFunction.class);
|
||||
@Override
|
||||
public void process(Tuple4<String,String, String, String> keys,
|
||||
Context context, Iterable<DosSketchLog> elements,
|
||||
Collector<DosSketchLog> out) {
|
||||
DosSketchLog middleResult = getMiddleResult(keys, elements);
|
||||
try {
|
||||
if (middleResult != null){
|
||||
out.collect(middleResult);
|
||||
logger.debug("获取中间聚合结果:{}",middleResult.toString());
|
||||
context.output(outputTag,TrafficServerIpMetrics.getOutputMetric(keys, middleResult));
|
||||
}
|
||||
}catch (Exception e){
|
||||
logger.error("获取中间聚合结果失败,middleResult: {}\n{}",middleResult.toString(),e);
|
||||
}
|
||||
}
|
||||
|
||||
private DosSketchLog getMiddleResult(Tuple4<String,String, String, String> keys,Iterable<DosSketchLog> elements){
|
||||
|
||||
DosSketchLog midResuleLog = new DosSketchLog();
|
||||
Tuple4<Long, Long, Long,String> values = sketchAggregate(elements);
|
||||
try {
|
||||
if (values != null){
|
||||
midResuleLog.setCommon_sled_ip(keys.f0);
|
||||
midResuleLog.setCommon_data_center(keys.f1);
|
||||
midResuleLog.setDestination_ip(keys.f3);
|
||||
midResuleLog.setAttack_type(keys.f2);
|
||||
midResuleLog.setSource_ip(values.f3);
|
||||
midResuleLog.setSketch_sessions(values.f0);
|
||||
midResuleLog.setSketch_packets(values.f1);
|
||||
midResuleLog.setSketch_bytes(values.f2);
|
||||
return midResuleLog;
|
||||
}
|
||||
} catch (Exception e){
|
||||
logger.error("加载中间结果集失败,keys: {} values: {}\n{}",keys,values,e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Tuple4<Long, Long, Long,String> sketchAggregate(Iterable<DosSketchLog> elements){
|
||||
int cnt = 1;
|
||||
long sessions = 0;
|
||||
long packets = 0 ;
|
||||
long bytes = 0;
|
||||
HashSet<String> sourceIpSet = new HashSet<>();
|
||||
try {
|
||||
for (DosSketchLog newSketchLog : elements){
|
||||
sessions += newSketchLog.getSketch_sessions();
|
||||
packets += newSketchLog.getSketch_packets();
|
||||
bytes += newSketchLog.getSketch_bytes();
|
||||
cnt += 1;
|
||||
if (sourceIpSet.size() < CommonConfig.SOURCE_IP_LIST_LIMIT){
|
||||
sourceIpSet.add(newSketchLog.getSource_ip());
|
||||
}
|
||||
}
|
||||
String sourceIpList = StringUtils.join(sourceIpSet, ",");
|
||||
return Tuple4.of(sessions/cnt,packets/cnt,bytes/cnt,sourceIpList);
|
||||
}catch (Exception e){
|
||||
logger.error("聚合中间结果集失败 {}",e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
85
src/main/java/com/zdjizhi/etl/ParseSketchLog.java
Normal file
85
src/main/java/com/zdjizhi/etl/ParseSketchLog.java
Normal file
@@ -0,0 +1,85 @@
|
||||
package com.zdjizhi.etl;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosSketchLog;
|
||||
import com.zdjizhi.source.DosSketchSource;
|
||||
import com.zdjizhi.utils.FlinkEnvironmentUtils;
|
||||
import com.zdjizhi.utils.JsonMapper;
|
||||
import com.zdjizhi.utils.StringUtil;
|
||||
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
|
||||
import org.apache.flink.api.common.functions.FlatMapFunction;
|
||||
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
|
||||
import org.apache.flink.util.Collector;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
||||
public class ParseSketchLog {
|
||||
|
||||
private static Logger logger = LoggerFactory.getLogger(ParseSketchLog.class);
|
||||
|
||||
public static SingleOutputStreamOperator<DosSketchLog> getSketchSource(){
|
||||
return flatSketchSource();
|
||||
}
|
||||
|
||||
private static SingleOutputStreamOperator<DosSketchLog> flatSketchSource(){
|
||||
return DosSketchSource.createDosSketchSourceByDatastream()
|
||||
.flatMap(new flatSketchLog())
|
||||
.assignTimestampsAndWatermarks(createWatermarkStrategy());
|
||||
}
|
||||
|
||||
private static WatermarkStrategy<DosSketchLog> createWatermarkStrategy(){
|
||||
return WatermarkStrategy
|
||||
.<DosSketchLog>forBoundedOutOfOrderness(Duration.ofSeconds(CommonConfig.FLINK_WATERMARK_MAX_ORDERNESS))
|
||||
.withTimestampAssigner((event, timestamp) -> event.getSketch_start_time() * 1000);
|
||||
}
|
||||
|
||||
private static class flatSketchLog implements FlatMapFunction<String, DosSketchLog> {
|
||||
@Override
|
||||
public void flatMap(String s, Collector<DosSketchLog> collector) throws Exception {
|
||||
try {
|
||||
if (StringUtil.isNotBlank(s)){
|
||||
HashMap<String, Object> sketchSource = (HashMap<String, Object>) JsonMapper.fromJsonString(s, Object.class);
|
||||
String commonSledIp = sketchSource.get("common_sled_ip").toString();
|
||||
String commonDataCenter = sketchSource.get("common_data_center").toString();
|
||||
long sketchStartTime = Long.parseLong(sketchSource.get("sketch_start_time").toString());
|
||||
long sketchDuration = Long.parseLong(sketchSource.get("sketch_duration").toString());
|
||||
String attackType = sketchSource.get("attack_type").toString();
|
||||
ArrayList<HashMap<String, Object>> reportIpList = (ArrayList<HashMap<String, Object>>) sketchSource.get("report_ip_list");
|
||||
for (HashMap<String, Object> obj : reportIpList) {
|
||||
DosSketchLog dosSketchLog = new DosSketchLog();
|
||||
dosSketchLog.setCommon_sled_ip(commonSledIp);
|
||||
dosSketchLog.setCommon_data_center(commonDataCenter);
|
||||
dosSketchLog.setSketch_start_time(sketchStartTime);
|
||||
dosSketchLog.setSketch_duration(sketchDuration);
|
||||
dosSketchLog.setAttack_type(attackType);
|
||||
String sourceIp = obj.get("source_ip").toString();
|
||||
String destinationIp = obj.get("destination_ip").toString();
|
||||
long sketchSessions = Long.parseLong(obj.get("sketch_sessions").toString());
|
||||
long sketchPackets = Long.parseLong(obj.get("sketch_packets").toString());
|
||||
long sketchBytes = Long.parseLong(obj.get("sketch_bytes").toString());
|
||||
dosSketchLog.setSource_ip(sourceIp);
|
||||
dosSketchLog.setDestination_ip(destinationIp);
|
||||
dosSketchLog.setSketch_sessions(sketchSessions/sketchDuration);
|
||||
dosSketchLog.setSketch_packets(sketchPackets/sketchDuration);
|
||||
dosSketchLog.setSketch_bytes(sketchBytes*8/sketchDuration);
|
||||
collector.collect(dosSketchLog);
|
||||
logger.info("数据解析成功:{}",dosSketchLog.toString());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("数据解析错误:{} \n{}",s,e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
flatSketchSource().print();
|
||||
FlinkEnvironmentUtils.streamExeEnv.execute();
|
||||
}
|
||||
|
||||
}
|
||||
33
src/main/java/com/zdjizhi/etl/TrafficServerIpMetrics.java
Normal file
33
src/main/java/com/zdjizhi/etl/TrafficServerIpMetrics.java
Normal file
@@ -0,0 +1,33 @@
|
||||
package com.zdjizhi.etl;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosMetricsLog;
|
||||
import com.zdjizhi.common.DosSketchLog;
|
||||
import org.apache.flink.api.java.tuple.Tuple4;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
class TrafficServerIpMetrics {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(TrafficServerIpMetrics.class);
|
||||
|
||||
static DosMetricsLog getOutputMetric(Tuple4<String, String, String, String> keys, DosSketchLog midResuleLog) {
|
||||
DosMetricsLog dosMetricsLog = new DosMetricsLog();
|
||||
dosMetricsLog.setSketch_start_time(timeFloor(System.currentTimeMillis()/1000));
|
||||
dosMetricsLog.setCommon_sled_ip(keys.f0);
|
||||
dosMetricsLog.setCommon_data_center(keys.f1);
|
||||
dosMetricsLog.setDestination_ip(keys.f2);
|
||||
dosMetricsLog.setAttack_type(keys.f3);
|
||||
dosMetricsLog.setSession_rate(midResuleLog.getSketch_sessions());
|
||||
dosMetricsLog.setPacket_rate(midResuleLog.getSketch_packets());
|
||||
dosMetricsLog.setBit_rate(midResuleLog.getSketch_bytes());
|
||||
logger.debug("metric 结果已加载:{}",dosMetricsLog.toString());
|
||||
return dosMetricsLog;
|
||||
}
|
||||
|
||||
private static long timeFloor(long sketchStartTime){
|
||||
return sketchStartTime / CommonConfig.FLINK_WINDOW_MAX_TIME * CommonConfig.FLINK_WINDOW_MAX_TIME;
|
||||
}
|
||||
|
||||
}
|
||||
11
src/main/java/com/zdjizhi/main/DosDetectionApplication.java
Normal file
11
src/main/java/com/zdjizhi/main/DosDetectionApplication.java
Normal file
@@ -0,0 +1,11 @@
|
||||
package com.zdjizhi.main;
|
||||
|
||||
import com.zdjizhi.sink.OutputStreamSink;
|
||||
|
||||
public class DosDetectionApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
OutputStreamSink.finalOutputSink();
|
||||
}
|
||||
|
||||
}
|
||||
16
src/main/java/com/zdjizhi/sink/DosEventSink.java
Normal file
16
src/main/java/com/zdjizhi/sink/DosEventSink.java
Normal file
@@ -0,0 +1,16 @@
|
||||
package com.zdjizhi.sink;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosEventLog;
|
||||
import com.zdjizhi.utils.JsonMapper;
|
||||
import com.zdjizhi.utils.KafkaUtils;
|
||||
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
|
||||
|
||||
public class DosEventSink {
|
||||
|
||||
static void dosEventOutputSink(SingleOutputStreamOperator<DosEventLog> dosEventLogOutputStream){
|
||||
dosEventLogOutputStream.map(JsonMapper::toJsonString).addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_EVENT_TOPIC_NAME))
|
||||
.setParallelism(CommonConfig.KAFKA_OUTPUT_EVENT_PARALLELISM);
|
||||
}
|
||||
|
||||
}
|
||||
126
src/main/java/com/zdjizhi/sink/OutputStreamSink.java
Normal file
126
src/main/java/com/zdjizhi/sink/OutputStreamSink.java
Normal file
@@ -0,0 +1,126 @@
|
||||
package com.zdjizhi.sink;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosEventLog;
|
||||
import com.zdjizhi.common.DosMetricsLog;
|
||||
import com.zdjizhi.common.DosSketchLog;
|
||||
import com.zdjizhi.etl.EtlProcessFunction;
|
||||
import com.zdjizhi.etl.DosDetection;
|
||||
import com.zdjizhi.etl.ParseSketchLog;
|
||||
import com.zdjizhi.source.BaselineSource;
|
||||
import com.zdjizhi.utils.FlinkEnvironmentUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.flink.api.common.functions.ReduceFunction;
|
||||
import org.apache.flink.api.common.state.MapStateDescriptor;
|
||||
import org.apache.flink.api.common.typeinfo.Types;
|
||||
import org.apache.flink.api.java.functions.KeySelector;
|
||||
import org.apache.flink.api.java.tuple.Tuple2;
|
||||
import org.apache.flink.api.java.tuple.Tuple4;
|
||||
import org.apache.flink.api.java.typeutils.MapTypeInfo;
|
||||
import org.apache.flink.streaming.api.datastream.*;
|
||||
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
|
||||
import org.apache.flink.streaming.api.windowing.time.Time;
|
||||
import org.apache.flink.util.OutputTag;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* @author 94976
|
||||
*/
|
||||
public class OutputStreamSink {
|
||||
private static final Logger logger = LoggerFactory.getLogger(OutputStreamSink.class);
|
||||
|
||||
public static OutputTag<DosMetricsLog> outputTag = new OutputTag<DosMetricsLog>("traffic server ip metrics"){};
|
||||
|
||||
public static MapStateDescriptor<String, Map<String, Map<String, List<Integer>>>> descriptor = new MapStateDescriptor<>("boradcast-state",
|
||||
Types.STRING,
|
||||
new MapTypeInfo<>(String.class, new MapTypeInfo<>(String.class, (Class<List<Integer>>) (Class<?>) List.class).getTypeClass()));
|
||||
|
||||
public static void finalOutputSink(){
|
||||
DosEventSink.dosEventOutputSink(getOutputSinkStream());
|
||||
TrafficServerIpMetricsSink.sideOutputMetricsSink(getMiddleStream());
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
SingleOutputStreamOperator<DosEventLog> dosEventLogOutputStream = getOutputSinkStream();
|
||||
DosEventSink.dosEventOutputSink(dosEventLogOutputStream);
|
||||
TrafficServerIpMetricsSink.sideOutputMetricsSink(getMiddleStream());
|
||||
dosEventLogOutputStream.print();
|
||||
FlinkEnvironmentUtils.streamExeEnv.execute();
|
||||
}
|
||||
|
||||
private static SingleOutputStreamOperator<DosEventLog> getOutputSinkStream(){
|
||||
|
||||
BroadcastStream<Map<String, Map<String,List<Integer>>>> broadcast = FlinkEnvironmentUtils.streamExeEnv
|
||||
.addSource(new BaselineSource())
|
||||
.broadcast(descriptor);
|
||||
logger.info("广播变量加载成功!!");
|
||||
|
||||
return getMiddleStream().keyBy(new SecondKeySelector())
|
||||
.reduce(new SecondReduceFunc())
|
||||
.connect(broadcast)
|
||||
.process(new DosDetection());
|
||||
}
|
||||
|
||||
private static SingleOutputStreamOperator<DosSketchLog> getMiddleStream(){
|
||||
return ParseSketchLog.getSketchSource()
|
||||
.keyBy(new FirstKeySelector())
|
||||
.window(TumblingEventTimeWindows.of(Time.seconds(CommonConfig.FLINK_WINDOW_MAX_TIME)))
|
||||
.process(new EtlProcessFunction());
|
||||
}
|
||||
|
||||
private static String groupUniqSourceIp(String sourceIp1,String sourceIp2){
|
||||
HashSet<String> sourceIpSet = new HashSet<>();
|
||||
Collections.addAll(sourceIpSet, (sourceIp1 + "," + sourceIp2).split(","));
|
||||
if (sourceIpSet.size() > CommonConfig.SOURCE_IP_LIST_LIMIT){
|
||||
return StringUtils.join(takeUniqLimit(sourceIpSet,CommonConfig.SOURCE_IP_LIST_LIMIT),",");
|
||||
}
|
||||
return StringUtils.join(sourceIpSet,",");
|
||||
}
|
||||
|
||||
private static<T> Collection<T> takeUniqLimit(Collection<T> collection, int limit){
|
||||
int i =0;
|
||||
Collection<T> newSet = new HashSet<>();
|
||||
for (T t:collection){
|
||||
if (i < limit){
|
||||
newSet.add(t);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
return newSet;
|
||||
}
|
||||
|
||||
private static class FirstKeySelector implements KeySelector<DosSketchLog, Tuple4<String, String, String, String>>{
|
||||
@Override
|
||||
public Tuple4<String, String, String, String> getKey(DosSketchLog dosSketchLog) throws Exception {
|
||||
return Tuple4.of(
|
||||
dosSketchLog.getCommon_sled_ip(),
|
||||
dosSketchLog.getCommon_data_center(),
|
||||
dosSketchLog.getAttack_type(),
|
||||
dosSketchLog.getDestination_ip());
|
||||
}
|
||||
}
|
||||
|
||||
private static class SecondKeySelector implements KeySelector<DosSketchLog, Tuple2<String, String>> {
|
||||
@Override
|
||||
public Tuple2<String, String> getKey(DosSketchLog dosSketchLog) throws Exception {
|
||||
return Tuple2.of(
|
||||
dosSketchLog.getAttack_type(),
|
||||
dosSketchLog.getDestination_ip());
|
||||
}
|
||||
}
|
||||
|
||||
private static class SecondReduceFunc implements ReduceFunction<DosSketchLog> {
|
||||
@Override
|
||||
public DosSketchLog reduce(DosSketchLog value1, DosSketchLog value2) throws Exception {
|
||||
value1.setSketch_sessions((value1.getSketch_sessions()+value2.getSketch_sessions())/2);
|
||||
value1.setSketch_bytes((value1.getSketch_bytes()+value2.getSketch_bytes())/2);
|
||||
value1.setSketch_packets((value1.getSketch_packets()+value2.getSketch_packets())/2);
|
||||
value1.setSource_ip(groupUniqSourceIp(value1.getSource_ip(),value2.getSource_ip()));
|
||||
return value1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package com.zdjizhi.sink;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.common.DosMetricsLog;
|
||||
import com.zdjizhi.common.DosSketchLog;
|
||||
import com.zdjizhi.utils.JsonMapper;
|
||||
import com.zdjizhi.utils.KafkaUtils;
|
||||
import org.apache.flink.streaming.api.datastream.DataStream;
|
||||
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
|
||||
|
||||
import static com.zdjizhi.sink.OutputStreamSink.outputTag;
|
||||
|
||||
class TrafficServerIpMetricsSink {
|
||||
|
||||
static void sideOutputMetricsSink(SingleOutputStreamOperator<DosSketchLog> outputStream){
|
||||
DataStream<DosMetricsLog> sideOutput = outputStream.getSideOutput(outputTag);
|
||||
sideOutput.map(JsonMapper::toJsonString).addSink(KafkaUtils.getKafkaSink(CommonConfig.KAFKA_OUTPUT_METRIC_TOPIC_NAME))
|
||||
.setParallelism(CommonConfig.KAFKA_OUTPUT_METRIC_PARALLELISM);
|
||||
}
|
||||
|
||||
}
|
||||
127
src/main/java/com/zdjizhi/source/BaselineSource.java
Normal file
127
src/main/java/com/zdjizhi/source/BaselineSource.java
Normal file
@@ -0,0 +1,127 @@
|
||||
package com.zdjizhi.source;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import org.apache.flink.configuration.Configuration;
|
||||
import org.apache.flink.streaming.api.datastream.DataStream;
|
||||
import org.apache.flink.streaming.api.datastream.DataStreamSource;
|
||||
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
|
||||
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.*;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.ArrayWritable;
|
||||
import org.apache.hadoop.io.IntWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author wlh
|
||||
*/
|
||||
public class BaselineSource extends RichSourceFunction<Map<String, Map<String,List<Integer>>>> {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(BaselineSource.class);
|
||||
private Connection conn = null;
|
||||
private Table table = null;
|
||||
private Scan scan = null;
|
||||
|
||||
@Override
|
||||
public void open(Configuration parameters) throws Exception {
|
||||
|
||||
org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
|
||||
|
||||
config.set("hbase.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
|
||||
config.set("hbase.client.retries.number", "3");
|
||||
config.set("hbase.bulkload.retries.number", "3");
|
||||
config.set("zookeeper.recovery.retry", "3");
|
||||
config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, CommonConfig.HBASE_CLIENT_OPERATION_TIMEOUT);
|
||||
config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CommonConfig.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
|
||||
|
||||
TableName tableName = TableName.valueOf(CommonConfig.HBASE_BASELINE_TABLE_NAME);
|
||||
conn = ConnectionFactory.createConnection(config);
|
||||
table = conn.getTable(tableName);
|
||||
scan = new Scan().setAllowPartialResults(true).setLimit(CommonConfig.HBASE_BASELINE_TOTAL_NUM);
|
||||
logger.info("连接hbase成功,正在读取baseline数据");
|
||||
|
||||
// .addFamily(Bytes.toBytes(CommonConfig.HBASE_BASELINE_FAMLIY_NAME));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
super.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(SourceContext<Map<String, Map<String,List<Integer>>>> sourceContext) throws Exception {
|
||||
ResultScanner rs = table.getScanner(scan);
|
||||
// Map<String, List<Integer>[]> baselineMap = new HashMap<>();
|
||||
Map<String, Map<String,List<Integer>>> baselineMap = new HashMap<>();
|
||||
for (Result result : rs) {
|
||||
Map<String, List<Integer>> floodTypeMap = new HashMap<>();
|
||||
String rowkey = Bytes.toString(result.getRow());
|
||||
ArrayList<Integer> tcp = getArraylist(result,"TCP SYN Flood", "session_num");
|
||||
ArrayList<Integer> udp = getArraylist(result,"UDP Flood", "b");
|
||||
ArrayList<Integer> icmp = getArraylist(result,"ICMP Flood", "c");
|
||||
ArrayList<Integer> dns = getArraylist(result,"DNS Amplification", "d");
|
||||
floodTypeMap.put("TCP SYN Flood",tcp);
|
||||
floodTypeMap.put("UDP Flood",udp);
|
||||
floodTypeMap.put("ICMP Flood",icmp);
|
||||
floodTypeMap.put("DNS Amplification",dns);
|
||||
// List[] arr = new ArrayList[]{tcp,udp,icmp,dns};
|
||||
baselineMap.put(rowkey,floodTypeMap);
|
||||
}
|
||||
sourceContext.collect(baselineMap);
|
||||
logger.info("格式化baseline数据成功,读取IP共:{}",baselineMap.size());
|
||||
}
|
||||
|
||||
private static ArrayList<Integer> getArraylist(Result result,String family,String qualifier) throws IOException {
|
||||
if (!result.containsColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier))){
|
||||
return null;
|
||||
}
|
||||
ArrayWritable w = new ArrayWritable(IntWritable.class);
|
||||
w.readFields(new DataInputStream(new ByteArrayInputStream(result.getValue(Bytes.toBytes(family), Bytes.toBytes(qualifier)))));
|
||||
return fromWritable(w);
|
||||
}
|
||||
|
||||
private static ArrayList<Integer> fromWritable(ArrayWritable writable) {
|
||||
Writable[] writables = writable.get();
|
||||
ArrayList<Integer> list = new ArrayList<>(writables.length);
|
||||
for (Writable wrt : writables) {
|
||||
list.add(((IntWritable)wrt).get());
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
try {
|
||||
if (table != null) {
|
||||
table.close();
|
||||
}
|
||||
if (conn != null) {
|
||||
conn.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
env.enableCheckpointing(5000);
|
||||
DataStreamSource<Map<String, Map<String,List<Integer>>>> mapDataStreamSource = env.addSource(new BaselineSource());
|
||||
DataStream<Map<String, Map<String,List<Integer>>>> broadcast = mapDataStreamSource.broadcast();
|
||||
mapDataStreamSource.print();
|
||||
env.execute();
|
||||
}
|
||||
}
|
||||
26
src/main/java/com/zdjizhi/source/DosSketchSource.java
Normal file
26
src/main/java/com/zdjizhi/source/DosSketchSource.java
Normal file
@@ -0,0 +1,26 @@
|
||||
package com.zdjizhi.source;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import com.zdjizhi.utils.FlinkEnvironmentUtils;
|
||||
import org.apache.flink.api.common.serialization.SimpleStringSchema;
|
||||
import org.apache.flink.streaming.api.datastream.DataStreamSource;
|
||||
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
public class DosSketchSource {
|
||||
|
||||
private static StreamExecutionEnvironment streamExeEnv = FlinkEnvironmentUtils.streamExeEnv;
|
||||
|
||||
public static DataStreamSource<String> createDosSketchSourceByDatastream(){
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty("bootstrap.servers", CommonConfig.KAFKA_INPUT_BOOTSTRAP_SERVERS);
|
||||
properties.setProperty("group.id", CommonConfig.KAFKA_GROUP_ID);
|
||||
|
||||
return streamExeEnv.addSource(new FlinkKafkaConsumer<String>(
|
||||
CommonConfig.KAFKA_INPUT_TOPIC_NAME,
|
||||
new SimpleStringSchema(), properties))
|
||||
.setParallelism(CommonConfig.KAFKA_INPUT_PARALLELISM);
|
||||
}
|
||||
}
|
||||
45
src/main/java/com/zdjizhi/utils/CommonConfigurations.java
Normal file
45
src/main/java/com/zdjizhi/utils/CommonConfigurations.java
Normal file
@@ -0,0 +1,45 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
public final class CommonConfigurations {
|
||||
|
||||
private static Properties propService = new Properties();
|
||||
|
||||
|
||||
public static String getStringProperty(String key) {
|
||||
|
||||
return propService.getProperty(key);
|
||||
|
||||
|
||||
}
|
||||
|
||||
public static Integer getIntProperty(String key) {
|
||||
|
||||
return Integer.parseInt(propService.getProperty(key));
|
||||
|
||||
}
|
||||
|
||||
public static Double getDoubleProperty(String key) {
|
||||
|
||||
return Double.parseDouble(propService.getProperty(key));
|
||||
|
||||
}
|
||||
|
||||
public static Long getLongProperty(String key) {
|
||||
return Long.parseLong(propService.getProperty(key));
|
||||
|
||||
}
|
||||
|
||||
public static Boolean getBooleanProperty(Integer type, String key) {
|
||||
return "true".equals(propService.getProperty(key).toLowerCase().trim());
|
||||
}
|
||||
|
||||
static {
|
||||
try {
|
||||
propService.load(CommonConfigurations.class.getClassLoader().getResourceAsStream("common.properties"));
|
||||
} catch (Exception e) {
|
||||
propService = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
199
src/main/java/com/zdjizhi/utils/DistributedLock.java
Normal file
199
src/main/java/com/zdjizhi/utils/DistributedLock.java
Normal file
@@ -0,0 +1,199 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import org.apache.zookeeper.*;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
|
||||
|
||||
|
||||
public class DistributedLock implements Lock, Watcher {
|
||||
private static final Logger logger = LoggerFactory.getLogger(DistributedLock.class);
|
||||
|
||||
private ZooKeeper zk = null;
|
||||
/**
|
||||
* 根节点
|
||||
*/
|
||||
private final String ROOT_LOCK = "/locks";
|
||||
/**
|
||||
* 竞争的资源
|
||||
*/
|
||||
private String lockName;
|
||||
/**
|
||||
* 等待的前一个锁
|
||||
*/
|
||||
private String waitLock;
|
||||
/**
|
||||
* 当前锁
|
||||
*/
|
||||
private String currentLock;
|
||||
/**
|
||||
* 计数器
|
||||
*/
|
||||
private CountDownLatch countDownLatch;
|
||||
|
||||
private int sessionTimeout = 2000;
|
||||
|
||||
private List<Exception> exceptionList = new ArrayList<Exception>();
|
||||
|
||||
/**
|
||||
* 配置分布式锁
|
||||
*
|
||||
* @param config 连接的url
|
||||
* @param lockName 竞争资源
|
||||
*/
|
||||
public DistributedLock(String config, String lockName) {
|
||||
this.lockName = lockName;
|
||||
try {
|
||||
// 连接zookeeper
|
||||
zk = new ZooKeeper(config, sessionTimeout, this);
|
||||
Stat stat = zk.exists(ROOT_LOCK, false);
|
||||
if (stat == null) {
|
||||
// 如果根节点不存在,则创建根节点
|
||||
zk.create(ROOT_LOCK, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
||||
}
|
||||
} catch (IOException | InterruptedException | KeeperException e) {
|
||||
logger.error("Node already exists!");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 节点监视器
|
||||
*/
|
||||
@Override
|
||||
public void process(WatchedEvent event) {
|
||||
if (this.countDownLatch != null) {
|
||||
this.countDownLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void lock() {
|
||||
if (exceptionList.size() > 0) {
|
||||
throw new LockException(exceptionList.get(0));
|
||||
}
|
||||
try {
|
||||
if (this.tryLock()) {
|
||||
logger.info(Thread.currentThread().getName() + " " + lockName + "获得了锁");
|
||||
} else {
|
||||
// 等待锁
|
||||
waitForLock(waitLock, sessionTimeout);
|
||||
}
|
||||
} catch (InterruptedException | KeeperException e) {
|
||||
logger.error("获取锁异常" + e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryLock() {
|
||||
try {
|
||||
String splitStr = "_lock_";
|
||||
if (lockName.contains(splitStr)) {
|
||||
throw new LockException("锁名有误");
|
||||
}
|
||||
// 创建临时有序节点
|
||||
currentLock = zk.create(ROOT_LOCK + "/" + lockName + splitStr, new byte[0],
|
||||
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
|
||||
// 取所有子节点
|
||||
List<String> subNodes = zk.getChildren(ROOT_LOCK, false);
|
||||
// 取出所有lockName的锁
|
||||
List<String> lockObjects = new ArrayList<String>();
|
||||
for (String node : subNodes) {
|
||||
String tmpNode = node.split(splitStr)[0];
|
||||
if (tmpNode.equals(lockName)) {
|
||||
lockObjects.add(node);
|
||||
}
|
||||
}
|
||||
Collections.sort(lockObjects);
|
||||
// 若当前节点为最小节点,则获取锁成功
|
||||
if (currentLock.equals(ROOT_LOCK + "/" + lockObjects.get(0))) {
|
||||
return true;
|
||||
}
|
||||
// 若不是最小节点,则找到自己的前一个节点
|
||||
String prevNode = currentLock.substring(currentLock.lastIndexOf("/") + 1);
|
||||
waitLock = lockObjects.get(Collections.binarySearch(lockObjects, prevNode) - 1);
|
||||
} catch (InterruptedException | KeeperException e) {
|
||||
logger.error("获取锁过程异常" + e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean tryLock(long timeout, TimeUnit unit) {
|
||||
try {
|
||||
if (this.tryLock()) {
|
||||
return true;
|
||||
}
|
||||
return waitForLock(waitLock, timeout);
|
||||
} catch (KeeperException | InterruptedException | RuntimeException e) {
|
||||
logger.error("判断是否锁定异常" + e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* 等待锁
|
||||
*
|
||||
* @param prev 锁名称
|
||||
* @param waitTime 等待时间
|
||||
* @return
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
private boolean waitForLock(String prev, long waitTime) throws KeeperException, InterruptedException {
|
||||
Stat stat = zk.exists(ROOT_LOCK + "/" + prev, true);
|
||||
|
||||
if (stat != null) {
|
||||
this.countDownLatch = new CountDownLatch(1);
|
||||
// 计数等待,若等到前一个节点消失,则precess中进行countDown,停止等待,获取锁
|
||||
this.countDownLatch.await(waitTime, TimeUnit.MILLISECONDS);
|
||||
this.countDownLatch = null;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlock() {
|
||||
try {
|
||||
zk.delete(currentLock, -1);
|
||||
currentLock = null;
|
||||
zk.close();
|
||||
} catch (InterruptedException | KeeperException e) {
|
||||
logger.error("关闭锁异常" + e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Condition newCondition() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void lockInterruptibly() throws InterruptedException {
|
||||
this.lock();
|
||||
}
|
||||
|
||||
|
||||
public class LockException extends RuntimeException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public LockException(String e) {
|
||||
super(e);
|
||||
}
|
||||
|
||||
public LockException(Exception e) {
|
||||
super(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
28
src/main/java/com/zdjizhi/utils/FlinkEnvironmentUtils.java
Normal file
28
src/main/java/com/zdjizhi/utils/FlinkEnvironmentUtils.java
Normal file
@@ -0,0 +1,28 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
|
||||
import org.apache.flink.table.api.EnvironmentSettings;
|
||||
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
|
||||
|
||||
|
||||
/**
|
||||
* @author wlh
|
||||
*/
|
||||
public class FlinkEnvironmentUtils {
|
||||
public static StreamExecutionEnvironment streamExeEnv = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
|
||||
|
||||
public static StreamTableEnvironment getStreamTableEnv() {
|
||||
streamExeEnv.setParallelism(CommonConfig.STREAM_EXECUTION_ENVIRONMENT_PARALLELISM);
|
||||
|
||||
EnvironmentSettings settings = EnvironmentSettings.newInstance()
|
||||
.useBlinkPlanner()
|
||||
.inStreamingMode()
|
||||
.build();
|
||||
|
||||
return StreamTableEnvironment.create(streamExeEnv, settings);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
5
src/main/java/com/zdjizhi/utils/HbaseUtils.java
Normal file
5
src/main/java/com/zdjizhi/utils/HbaseUtils.java
Normal file
@@ -0,0 +1,5 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
public class HbaseUtils {
|
||||
|
||||
}
|
||||
23
src/main/java/com/zdjizhi/utils/IpUtils.java
Normal file
23
src/main/java/com/zdjizhi/utils/IpUtils.java
Normal file
@@ -0,0 +1,23 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
|
||||
public class IpUtils {
|
||||
|
||||
/**
|
||||
* IP定位库工具类
|
||||
*/
|
||||
public static IpLookup ipLookup = new IpLookup.Builder(false)
|
||||
.loadDataFileV4(CommonConfig.IP_MMDB_PATH + "ip_v4.mmdb")
|
||||
.loadDataFileV6(CommonConfig.IP_MMDB_PATH + "ip_v6.mmdb")
|
||||
.loadDataFilePrivateV4(CommonConfig.IP_MMDB_PATH + "ip_private_v4.mmdb")
|
||||
.loadDataFilePrivateV6(CommonConfig.IP_MMDB_PATH + "ip_private_v6.mmdb")
|
||||
.build();
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(ipLookup.countryLookup("61.128.159.186"));
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
26
src/main/java/com/zdjizhi/utils/KafkaUtils.java
Normal file
26
src/main/java/com/zdjizhi/utils/KafkaUtils.java
Normal file
@@ -0,0 +1,26 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import org.apache.flink.api.common.serialization.SimpleStringSchema;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
public class KafkaUtils {
|
||||
|
||||
private static Properties getKafkaSinkProperty(){
|
||||
Properties propertiesproducer = new Properties();
|
||||
propertiesproducer.setProperty("bootstrap.servers", CommonConfig.KAFKA_OUTPUT_BOOTSTRAP_SERVERS);
|
||||
|
||||
return propertiesproducer;
|
||||
}
|
||||
|
||||
public static FlinkKafkaProducer<String> getKafkaSink(String topic){
|
||||
return new FlinkKafkaProducer<String>(
|
||||
topic,
|
||||
new SimpleStringSchema(),
|
||||
getKafkaSinkProperty()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
204
src/main/java/com/zdjizhi/utils/SnowflakeId.java
Normal file
204
src/main/java/com/zdjizhi/utils/SnowflakeId.java
Normal file
@@ -0,0 +1,204 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import com.zdjizhi.common.CommonConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class SnowflakeId {
|
||||
private static final Logger logger = LoggerFactory.getLogger(SnowflakeId.class);
|
||||
|
||||
/**
|
||||
* 共64位 第一位为符号位 默认0
|
||||
* 时间戳 39位(17 year), centerId:(关联每个环境或任务数) :7位(0-127),
|
||||
* workerId(关联进程):6(0-63) ,序列号:11位(2047/ms)
|
||||
*
|
||||
* 序列号 /ms = (-1L ^ (-1L << 11))
|
||||
* 最大使用年 = (1L << 39) / (1000L * 60 * 60 * 24 * 365)
|
||||
*/
|
||||
/**
|
||||
* 开始时间截 (2020-11-14 00:00:00) max 17years
|
||||
*/
|
||||
private final long twepoch = 1605283200000L;
|
||||
|
||||
/**
|
||||
* 机器id所占的位数
|
||||
*/
|
||||
private final long workerIdBits = 6L;
|
||||
|
||||
/**
|
||||
* 数据标识id所占的位数
|
||||
*/
|
||||
private final long dataCenterIdBits = 7L;
|
||||
|
||||
/**
|
||||
* 支持的最大机器id,结果是63 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
|
||||
* M << n = M * 2^n
|
||||
*/
|
||||
private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
|
||||
|
||||
/**
|
||||
* 支持的最大数据标识id,结果是127
|
||||
*/
|
||||
private final long maxDataCenterId = -1L ^ (-1L << dataCenterIdBits);
|
||||
|
||||
/**
|
||||
* 序列在id中占的位数
|
||||
*/
|
||||
private final long sequenceBits = 11L;
|
||||
|
||||
/**
|
||||
* 机器ID向左移12位
|
||||
*/
|
||||
private final long workerIdShift = sequenceBits;
|
||||
|
||||
/**
|
||||
* 数据标识id向左移17位(14+6)
|
||||
*/
|
||||
private final long dataCenterIdShift = sequenceBits + workerIdBits;
|
||||
|
||||
/**
|
||||
* 时间截向左移22位(4+6+14)
|
||||
*/
|
||||
private final long timestampLeftShift = sequenceBits + workerIdBits + dataCenterIdBits;
|
||||
|
||||
/**
|
||||
* 生成序列的掩码,这里为2047
|
||||
*/
|
||||
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
|
||||
|
||||
/**
|
||||
* 工作机器ID(0~63)
|
||||
*/
|
||||
private long workerId;
|
||||
|
||||
/**
|
||||
* 数据中心ID(0~127)
|
||||
*/
|
||||
private long dataCenterId;
|
||||
|
||||
/**
|
||||
* 毫秒内序列(0~2047)
|
||||
*/
|
||||
private long sequence = 0L;
|
||||
|
||||
/**
|
||||
* 上次生成ID的时间截
|
||||
*/
|
||||
private long lastTimestamp = -1L;
|
||||
|
||||
|
||||
/**
|
||||
* 设置允许时间回拨的最大限制10s
|
||||
*/
|
||||
private static final long ROLL_BACK_TIME = 10000L;
|
||||
|
||||
|
||||
private static SnowflakeId idWorker;
|
||||
|
||||
private static ZookeeperUtils zookeeperUtils = new ZookeeperUtils();
|
||||
|
||||
static {
|
||||
idWorker = new SnowflakeId(CommonConfig.HBASE_ZOOKEEPER_QUORUM, CommonConfig.DATA_CENTER_ID_NUM);
|
||||
}
|
||||
|
||||
//==============================Constructors=====================================
|
||||
|
||||
/**
|
||||
* 构造函数
|
||||
*/
|
||||
private SnowflakeId(String zookeeperIp, long dataCenterIdNum) {
|
||||
DistributedLock lock = new DistributedLock(CommonConfig.HBASE_ZOOKEEPER_QUORUM, "disLocks1");
|
||||
try {
|
||||
lock.lock();
|
||||
int tmpWorkerId = zookeeperUtils.modifyNode("/Snowflake/" + "worker" + dataCenterIdNum, zookeeperIp);
|
||||
if (tmpWorkerId > maxWorkerId || tmpWorkerId < 0) {
|
||||
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
|
||||
}
|
||||
if (dataCenterIdNum > maxDataCenterId || dataCenterIdNum < 0) {
|
||||
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than ", maxDataCenterId));
|
||||
}
|
||||
this.workerId = tmpWorkerId;
|
||||
this.dataCenterId = dataCenterIdNum;
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("This is not usual error!!!===>>>" + e + "<<<===");
|
||||
}finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// ==============================Methods==========================================
|
||||
|
||||
/**
|
||||
* 获得下一个ID (该方法是线程安全的)
|
||||
*
|
||||
* @return SnowflakeId
|
||||
*/
|
||||
private synchronized long nextId() {
|
||||
long timestamp = timeGen();
|
||||
//设置一个允许回拨限制时间,系统时间回拨范围在rollBackTime内可以等待校准
|
||||
if (lastTimestamp - timestamp > 0 && lastTimestamp - timestamp < ROLL_BACK_TIME) {
|
||||
timestamp = tilNextMillis(lastTimestamp);
|
||||
}
|
||||
//如果当前时间小于上一次ID生成的时间戳,说明系统时钟回退过这个时候应当抛出异常
|
||||
if (timestamp < lastTimestamp) {
|
||||
throw new RuntimeException(
|
||||
String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
|
||||
}
|
||||
|
||||
//如果是同一时间生成的,则进行毫秒内序列
|
||||
if (lastTimestamp == timestamp) {
|
||||
sequence = (sequence + 1) & sequenceMask;
|
||||
//毫秒内序列溢出
|
||||
if (sequence == 0) {
|
||||
//阻塞到下一个毫秒,获得新的时间戳
|
||||
timestamp = tilNextMillis(lastTimestamp);
|
||||
}
|
||||
}
|
||||
//时间戳改变,毫秒内序列重置
|
||||
else {
|
||||
sequence = 0L;
|
||||
}
|
||||
|
||||
//上次生成ID的时间截
|
||||
lastTimestamp = timestamp;
|
||||
|
||||
//移位并通过或运算拼到一起组成64位的ID
|
||||
return ((timestamp - twepoch) << timestampLeftShift)
|
||||
| (dataCenterId << dataCenterIdShift)
|
||||
| (workerId << workerIdShift)
|
||||
| sequence;
|
||||
}
|
||||
|
||||
/**
|
||||
* 阻塞到下一个毫秒,直到获得新的时间戳
|
||||
*
|
||||
* @param lastTimestamp 上次生成ID的时间截
|
||||
* @return 当前时间戳
|
||||
*/
|
||||
private long tilNextMillis(long lastTimestamp) {
|
||||
long timestamp = timeGen();
|
||||
while (timestamp <= lastTimestamp) {
|
||||
timestamp = timeGen();
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* 返回以毫秒为单位的当前时间
|
||||
*
|
||||
* @return 当前时间(毫秒)
|
||||
*/
|
||||
private long timeGen() {
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 静态工具类
|
||||
*/
|
||||
public static Long generateId() {
|
||||
return idWorker.nextId();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
135
src/main/java/com/zdjizhi/utils/ZookeeperUtils.java
Normal file
135
src/main/java/com/zdjizhi/utils/ZookeeperUtils.java
Normal file
@@ -0,0 +1,135 @@
|
||||
package com.zdjizhi.utils;
|
||||
|
||||
import cn.hutool.core.util.StrUtil;
|
||||
import org.apache.zookeeper.*;
|
||||
import org.apache.zookeeper.data.ACL;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
|
||||
public class ZookeeperUtils implements Watcher {
|
||||
private static final Logger logger = LoggerFactory.getLogger(ZookeeperUtils.class);
|
||||
|
||||
private ZooKeeper zookeeper;
|
||||
|
||||
private static final int SESSION_TIME_OUT = 20000;
|
||||
|
||||
private CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public void process(WatchedEvent event) {
|
||||
if (event.getState() == Event.KeeperState.SyncConnected) {
|
||||
countDownLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 修改节点信息
|
||||
*
|
||||
* @param path 节点路径
|
||||
*/
|
||||
int modifyNode(String path, String zookeeperIp) {
|
||||
createNode(path, "0".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, zookeeperIp);
|
||||
int workerId = 0;
|
||||
try {
|
||||
connectZookeeper(zookeeperIp);
|
||||
Stat stat = zookeeper.exists(path, true);
|
||||
workerId = Integer.parseInt(getNodeDate(path));
|
||||
if (workerId > 63) {
|
||||
workerId = 0;
|
||||
zookeeper.setData(path, "1".getBytes(), stat.getVersion());
|
||||
} else {
|
||||
String result = String.valueOf(workerId + 1);
|
||||
if (stat != null) {
|
||||
zookeeper.setData(path, result.getBytes(), stat.getVersion());
|
||||
} else {
|
||||
logger.error("Node does not exist!,Can't modify");
|
||||
}
|
||||
}
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
logger.error("modify error Can't modify," + e);
|
||||
} finally {
|
||||
closeConn();
|
||||
}
|
||||
logger.warn("workerID is:" + workerId);
|
||||
return workerId;
|
||||
}
|
||||
|
||||
/**
|
||||
* 连接zookeeper
|
||||
*
|
||||
* @param host 地址
|
||||
*/
|
||||
private void connectZookeeper(String host) {
|
||||
try {
|
||||
zookeeper = new ZooKeeper(host, SESSION_TIME_OUT, this);
|
||||
countDownLatch.await();
|
||||
} catch (IOException | InterruptedException e) {
|
||||
logger.error("Connection to the Zookeeper Exception! message:" + e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 关闭连接
|
||||
*/
|
||||
private void closeConn() {
|
||||
try {
|
||||
if (zookeeper != null) {
|
||||
zookeeper.close();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Close the Zookeeper connection Exception! message:" + e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取节点内容
|
||||
*
|
||||
* @param path 节点路径
|
||||
* @return 内容/异常null
|
||||
*/
|
||||
private String getNodeDate(String path) {
|
||||
String result = null;
|
||||
Stat stat = new Stat();
|
||||
try {
|
||||
byte[] resByte = zookeeper.getData(path, true, stat);
|
||||
|
||||
result = StrUtil.str(resByte, "UTF-8");
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
logger.error("Get node information exception" + e);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param path 节点创建的路径
|
||||
* @param date 节点所存储的数据的byte[]
|
||||
* @param acls 控制权限策略
|
||||
*/
|
||||
private void createNode(String path, byte[] date, List<ACL> acls, String zookeeperIp) {
|
||||
try {
|
||||
connectZookeeper(zookeeperIp);
|
||||
Stat exists = zookeeper.exists(path, true);
|
||||
if (exists == null) {
|
||||
Stat existsSnowflakeld = zookeeper.exists("/Snowflake", true);
|
||||
if (existsSnowflakeld == null) {
|
||||
zookeeper.create("/Snowflake", null, acls, CreateMode.PERSISTENT);
|
||||
}
|
||||
zookeeper.create(path, date, acls, CreateMode.PERSISTENT);
|
||||
} else {
|
||||
logger.warn("Node already exists ! Don't need to create");
|
||||
}
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
logger.error(e.toString());
|
||||
} finally {
|
||||
closeConn();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
37
src/main/resources/common.properties
Normal file
37
src/main/resources/common.properties
Normal file
@@ -0,0 +1,37 @@
|
||||
stream.execution.environment.parallelism=1
|
||||
|
||||
kafka.input.parallelism=1
|
||||
kafka.input.topic.name=DOS-SKETCH-LOG
|
||||
kafka.input.bootstrap.servers=192.168.44.12:9092
|
||||
kafka.input.scan.startup.mode=latest-offset
|
||||
kafka.input.group.id=test
|
||||
|
||||
kafka.output.metric.parallelism=1
|
||||
kafka.output.metric.topic.name=TRAFFIC-TOP-DESTINATION-IP-METRICS-LOG
|
||||
kafka.output.event.parallelism=1
|
||||
kafka.output.event.topic.name=DOS-EVENT-LOG
|
||||
kafka.output.bootstrap.servers=192.168.44.12:9092
|
||||
|
||||
hbase.zookeeper.quorum=192.168.44.12:2181
|
||||
hbase.zookeeper.client.port=2181
|
||||
hbase.client.operation.timeout=30000
|
||||
hbase.client.scanner.timeout.period=30000
|
||||
|
||||
hbase.baseline.table.name=ddos_traffic_baselines
|
||||
hbase.baseline.famliy.name=TCP SYN Flood
|
||||
hbase.baseline.total.num=1000000
|
||||
|
||||
flink.watermark.max.orderness=1
|
||||
flink.window.max.time=600
|
||||
|
||||
source.ip.list.limit=10000
|
||||
|
||||
data.center.id.num=15
|
||||
|
||||
ip.mmdb.path=D:\\data\\dat\\
|
||||
|
||||
baseline.sessions.minor.threshold=0.1
|
||||
baseline.sessions.warning.threshold=0.5
|
||||
baseline.sessions.major.threshold=1
|
||||
baseline.sessions.severe.threshold=3
|
||||
baseline.sessions.critical.threshold=8
|
||||
6
src/test/java/com/zdjizhi/common/FlinkJsonTest.java
Normal file
6
src/test/java/com/zdjizhi/common/FlinkJsonTest.java
Normal file
@@ -0,0 +1,6 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
public class FlinkJsonTest {
|
||||
public static void main(String[] args) {
|
||||
}
|
||||
}
|
||||
64
src/test/java/com/zdjizhi/common/HbaseTest.java
Normal file
64
src/test/java/com/zdjizhi/common/HbaseTest.java
Normal file
@@ -0,0 +1,64 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.*;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.ArrayWritable;
|
||||
import org.apache.hadoop.io.IntWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class HbaseTest {
|
||||
public static void main(String[] args) throws IOException {
|
||||
org.apache.hadoop.conf.Configuration config = HBaseConfiguration.create();
|
||||
|
||||
config.set("hbase.zookeeper.quorum", CommonConfig.HBASE_ZOOKEEPER_QUORUM);
|
||||
config.set("hbase.client.retries.number", "3");
|
||||
config.set("hbase.bulkload.retries.number", "3");
|
||||
config.set("zookeeper.recovery.retry", "3");
|
||||
config.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, CommonConfig.HBASE_CLIENT_OPERATION_TIMEOUT);
|
||||
config.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CommonConfig.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
|
||||
|
||||
TableName tableName = TableName.valueOf("dos_test");
|
||||
Connection conn = ConnectionFactory.createConnection(config);
|
||||
Table table = conn.getTable(tableName);
|
||||
|
||||
int[] arr = {1,3,4,5,1,2,4,4,15,6,37,234,1241};
|
||||
Put abcPut = new Put(Bytes.toBytes("abc"));
|
||||
abcPut.addColumn(Bytes.toBytes("attribute"),Bytes.toBytes("123"), WritableUtils.toByteArray(toWritable(arr)));
|
||||
table.put(abcPut);
|
||||
|
||||
Get abcGet = new Get(Bytes.toBytes("abc"));
|
||||
Result r = table.get(abcGet);
|
||||
ArrayWritable w = new ArrayWritable(IntWritable.class);
|
||||
w.readFields(new DataInputStream(new ByteArrayInputStream(r.getValue(Bytes.toBytes("attribute"), Bytes.toBytes("123")))));
|
||||
ArrayList<Integer> arr2 = fromWritable(w);
|
||||
System.out.println(arr2.toString());
|
||||
|
||||
}
|
||||
|
||||
public static Writable toWritable(int[] arr) {
|
||||
Writable[] content = new Writable[arr.length];
|
||||
for (int i = 0; i < content.length; i++) {
|
||||
content[i] = new IntWritable(arr[i]);
|
||||
}
|
||||
return new ArrayWritable(IntWritable.class, content);
|
||||
}
|
||||
|
||||
public static ArrayList<Integer> fromWritable(ArrayWritable writable) {
|
||||
Writable[] writables = ((ArrayWritable) writable).get();
|
||||
ArrayList<Integer> list = new ArrayList<Integer>(writables.length);
|
||||
for (Writable wrt : writables) {
|
||||
list.add(((IntWritable)wrt).get());
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
}
|
||||
17
src/test/java/com/zdjizhi/common/UdtfTest.java
Normal file
17
src/test/java/com/zdjizhi/common/UdtfTest.java
Normal file
@@ -0,0 +1,17 @@
|
||||
package com.zdjizhi.common;
|
||||
|
||||
import org.apache.flink.table.functions.TableFunction;
|
||||
import org.apache.flink.types.Row;
|
||||
|
||||
public class UdtfTest extends TableFunction<Row> {
|
||||
|
||||
public void eval(Row[] rows) {
|
||||
for (Row row : rows) {
|
||||
collect(row);
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user