提交Live Traffic Chart重构后初版代码。(TSG-14799)
This commit is contained in:
71
src/main/java/com/zdjizhi/common/config/GlobalConfig.java
Normal file
71
src/main/java/com/zdjizhi/common/config/GlobalConfig.java
Normal file
@@ -0,0 +1,71 @@
|
||||
package com.zdjizhi.common.config;
|
||||
|
||||
|
||||
import org.jasypt.encryption.pbe.StandardPBEStringEncryptor;
|
||||
|
||||
/**
|
||||
* @author Administrator
|
||||
*/
|
||||
public class GlobalConfig {
|
||||
|
||||
private static StandardPBEStringEncryptor encryptor = new StandardPBEStringEncryptor();
|
||||
|
||||
static {
|
||||
encryptor.setPassword("galaxy");
|
||||
}
|
||||
|
||||
/**
|
||||
* 协议分隔符,需要转义
|
||||
*/
|
||||
public static final String PROTOCOL_SPLITTER = "\\.";
|
||||
|
||||
|
||||
/**
|
||||
* System
|
||||
*/
|
||||
public static final Integer SOURCE_PARALLELISM = GlobalConfigLoad.getIntProperty(0, "source.parallelism");
|
||||
public static final Integer PARSE_PARALLELISM = GlobalConfigLoad.getIntProperty(0, "parse.parallelism");
|
||||
public static final Integer WINDOW_PARALLELISM = GlobalConfigLoad.getIntProperty(0, "window.parallelism");
|
||||
public static final Integer COUNT_WINDOW_TIME = GlobalConfigLoad.getIntProperty(0, "count.window.time");
|
||||
public static final String TOOLS_LIBRARY = GlobalConfigLoad.getStringProperty(0, "tools.library");
|
||||
public static final Integer SINK_PARALLELISM = GlobalConfigLoad.getIntProperty(0, "sink.parallelism");
|
||||
|
||||
/**
|
||||
* Kafka common
|
||||
*/
|
||||
public static final String KAFKA_SASL_JAAS_USER = encryptor.decrypt(GlobalConfigLoad.getStringProperty(1, "kafka.user"));
|
||||
public static final String KAFKA_SASL_JAAS_PIN = encryptor.decrypt(GlobalConfigLoad.getStringProperty(1, "kafka.pin"));
|
||||
|
||||
|
||||
/**
|
||||
* kafka sink config
|
||||
*/
|
||||
public static final String SINK_KAFKA_SERVERS = GlobalConfigLoad.getStringProperty(0, "sink.kafka.servers");
|
||||
public static final String SINK_KAFKA_TOPIC = GlobalConfigLoad.getStringProperty(0, "sink.kafka.topic");
|
||||
public static final String PRODUCER_ACK = GlobalConfigLoad.getStringProperty(1, "producer.ack");
|
||||
public static final String RETRIES = GlobalConfigLoad.getStringProperty(1, "retries");
|
||||
public static final String LINGER_MS = GlobalConfigLoad.getStringProperty(1, "linger.ms");
|
||||
public static final Integer REQUEST_TIMEOUT_MS = GlobalConfigLoad.getIntProperty(1, "request.timeout.ms");
|
||||
public static final Integer BATCH_SIZE = GlobalConfigLoad.getIntProperty(1, "batch.size");
|
||||
public static final Integer BUFFER_MEMORY = GlobalConfigLoad.getIntProperty(1, "buffer.memory");
|
||||
public static final Integer MAX_REQUEST_SIZE = GlobalConfigLoad.getIntProperty(1, "max.request.size");
|
||||
|
||||
|
||||
/**
|
||||
* kafka source config
|
||||
*/
|
||||
public static final String SOURCE_KAFKA_SERVERS = GlobalConfigLoad.getStringProperty(0, "source.kafka.servers");
|
||||
public static final String SOURCE_KAFKA_TOPIC = GlobalConfigLoad.getStringProperty(0, "source.kafka.topic");
|
||||
public static final String GROUP_ID = GlobalConfigLoad.getStringProperty(0, "group.id");
|
||||
public static final String SESSION_TIMEOUT_MS = GlobalConfigLoad.getStringProperty(1, "session.timeout.ms");
|
||||
public static final String MAX_POLL_RECORDS = GlobalConfigLoad.getStringProperty(1, "max.poll.records");
|
||||
public static final String MAX_PARTITION_FETCH_BYTES = GlobalConfigLoad.getStringProperty(1, "max.partition.fetch.bytes");
|
||||
|
||||
|
||||
/**
|
||||
* kafka限流配置-20201117
|
||||
*/
|
||||
public static final String PRODUCER_KAFKA_COMPRESSION_TYPE = GlobalConfigLoad.getStringProperty(1, "producer.kafka.compression.type");
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package com.zdjizhi.common.config;
|
||||
|
||||
import com.zdjizhi.utils.StringUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Properties;
|
||||
|
||||
|
||||
/**
|
||||
* @author Administrator
|
||||
*/
|
||||
|
||||
public final class GlobalConfigLoad {
|
||||
|
||||
private static Properties propKafka = new Properties();
|
||||
private static Properties propService = new Properties();
|
||||
|
||||
|
||||
public static String getStringProperty(Integer type, String key) {
|
||||
if (type == 0) {
|
||||
return propService.getProperty(key);
|
||||
} else if (type == 1) {
|
||||
return propKafka.getProperty(key);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static Integer getIntProperty(Integer type, String key) {
|
||||
if (type == 0) {
|
||||
return Integer.parseInt(propService.getProperty(key));
|
||||
} else if (type == 1) {
|
||||
return Integer.parseInt(propKafka.getProperty(key));
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static Long getLongProperty(Integer type, String key) {
|
||||
if (type == 0) {
|
||||
return Long.parseLong(propService.getProperty(key));
|
||||
} else if (type == 1) {
|
||||
return Long.parseLong(propKafka.getProperty(key));
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static Boolean getBooleanProperty(Integer type, String key) {
|
||||
if (type == 0) {
|
||||
return StringUtil.equals(propService.getProperty(key).toLowerCase().trim().toUpperCase(Locale.ENGLISH), "true");
|
||||
} else if (type == 1) {
|
||||
return StringUtil.equals(propKafka.getProperty(key).toLowerCase().trim().toUpperCase(Locale.ENGLISH), "true");
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
try {
|
||||
propService.load(GlobalConfigLoad.class.getClassLoader().getResourceAsStream("service_flow_config.properties"));
|
||||
propKafka.load(GlobalConfigLoad.class.getClassLoader().getResourceAsStream("default_config.properties"));
|
||||
} catch (IOException | RuntimeException e) {
|
||||
propKafka = null;
|
||||
propService = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
257
src/main/java/com/zdjizhi/common/pojo/AppProtocol.java
Normal file
257
src/main/java/com/zdjizhi/common/pojo/AppProtocol.java
Normal file
@@ -0,0 +1,257 @@
|
||||
package com.zdjizhi.common.pojo;
|
||||
|
||||
import com.alibaba.fastjson.annotation.JSONField;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.common.pojo
|
||||
* @Description:
|
||||
* @date 2023/4/2116:06
|
||||
*/
|
||||
public class AppProtocol {
|
||||
private Long timestamp;
|
||||
private int vsys_id;
|
||||
private String device_id;
|
||||
private String device_group;
|
||||
private String data_center;
|
||||
private String protocol_stack_id;
|
||||
private String app_name;
|
||||
private Long sessions;
|
||||
private Long in_bytes;
|
||||
private Long out_bytes;
|
||||
private Long in_pkts;
|
||||
private Long out_pkts;
|
||||
private Long c2s_pkts;
|
||||
private Long s2c_pkts;
|
||||
private Long c2s_bytes;
|
||||
private Long s2c_bytes;
|
||||
private Long c2s_fragments;
|
||||
private Long s2c_fragments;
|
||||
private Long c2s_tcp_lost_bytes;
|
||||
private Long s2c_tcp_lost_bytes;
|
||||
private Long c2s_tcp_ooorder_pkts;
|
||||
private Long s2c_tcp_ooorder_pkts;
|
||||
private Long c2s_tcp_retransmitted_pkts;
|
||||
private Long s2c_tcp_retransmitted_pkts;
|
||||
private Long c2s_tcp_retransmitted_bytes;
|
||||
private Long s2c_tcp_retransmitted_bytes;
|
||||
private String client_ip_sketch;
|
||||
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public void setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public int getVsys_id() {
|
||||
return vsys_id;
|
||||
}
|
||||
|
||||
public void setVsys_id(int vsys_id) {
|
||||
this.vsys_id = vsys_id;
|
||||
}
|
||||
|
||||
public String getDevice_id() {
|
||||
return device_id;
|
||||
}
|
||||
|
||||
public void setDevice_id(String device_id) {
|
||||
this.device_id = device_id;
|
||||
}
|
||||
|
||||
public String getDevice_group() {
|
||||
return device_group;
|
||||
}
|
||||
|
||||
public void setDevice_group(String device_group) {
|
||||
this.device_group = device_group;
|
||||
}
|
||||
|
||||
public String getData_center() {
|
||||
return data_center;
|
||||
}
|
||||
|
||||
public void setData_center(String data_center) {
|
||||
this.data_center = data_center;
|
||||
}
|
||||
|
||||
public String getProtocol_stack_id() {
|
||||
return protocol_stack_id;
|
||||
}
|
||||
|
||||
@JSONField(name = "protocol_label")
|
||||
public void setProtocol_stack_id(String protocol_stack_id) {
|
||||
this.protocol_stack_id = protocol_stack_id;
|
||||
}
|
||||
|
||||
public String getApp_name() {
|
||||
return app_name;
|
||||
}
|
||||
|
||||
@JSONField(name = "app_full_path")
|
||||
public void setApp_name(String app_name) {
|
||||
this.app_name = app_name;
|
||||
}
|
||||
|
||||
public Long getSessions() {
|
||||
return sessions;
|
||||
}
|
||||
|
||||
public void setSessions(Long sessions) {
|
||||
this.sessions = sessions;
|
||||
}
|
||||
|
||||
public Long getIn_bytes() {
|
||||
return in_bytes;
|
||||
}
|
||||
|
||||
public void setIn_bytes(Long in_bytes) {
|
||||
this.in_bytes = in_bytes;
|
||||
}
|
||||
|
||||
public Long getOut_bytes() {
|
||||
return out_bytes;
|
||||
}
|
||||
|
||||
public void setOut_bytes(Long out_bytes) {
|
||||
this.out_bytes = out_bytes;
|
||||
}
|
||||
|
||||
public Long getIn_pkts() {
|
||||
return in_pkts;
|
||||
}
|
||||
|
||||
public void setIn_pkts(Long in_pkts) {
|
||||
this.in_pkts = in_pkts;
|
||||
}
|
||||
|
||||
public Long getOut_pkts() {
|
||||
return out_pkts;
|
||||
}
|
||||
|
||||
public void setOut_pkts(Long out_pkts) {
|
||||
this.out_pkts = out_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_pkts() {
|
||||
return c2s_pkts;
|
||||
}
|
||||
|
||||
public void setC2s_pkts(Long c2s_pkts) {
|
||||
this.c2s_pkts = c2s_pkts;
|
||||
}
|
||||
|
||||
public Long getS2c_pkts() {
|
||||
return s2c_pkts;
|
||||
}
|
||||
|
||||
public void setS2c_pkts(Long s2c_pkts) {
|
||||
this.s2c_pkts = s2c_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_bytes() {
|
||||
return c2s_bytes;
|
||||
}
|
||||
|
||||
public void setC2s_bytes(Long c2s_bytes) {
|
||||
this.c2s_bytes = c2s_bytes;
|
||||
}
|
||||
|
||||
public Long getS2c_bytes() {
|
||||
return s2c_bytes;
|
||||
}
|
||||
|
||||
public void setS2c_bytes(Long s2c_bytes) {
|
||||
this.s2c_bytes = s2c_bytes;
|
||||
}
|
||||
|
||||
public Long getC2s_fragments() {
|
||||
return c2s_fragments;
|
||||
}
|
||||
|
||||
public void setC2s_fragments(Long c2s_fragments) {
|
||||
this.c2s_fragments = c2s_fragments;
|
||||
}
|
||||
|
||||
public Long getS2c_fragments() {
|
||||
return s2c_fragments;
|
||||
}
|
||||
|
||||
public void setS2c_fragments(Long s2c_fragments) {
|
||||
this.s2c_fragments = s2c_fragments;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_lost_bytes() {
|
||||
return c2s_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_lost_bytes(Long c2s_tcp_lost_bytes) {
|
||||
this.c2s_tcp_lost_bytes = c2s_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_lost_bytes() {
|
||||
return s2c_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_lost_bytes(Long s2c_tcp_lost_bytes) {
|
||||
this.s2c_tcp_lost_bytes = s2c_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_ooorder_pkts() {
|
||||
return c2s_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_ooorder_pkts(Long c2s_tcp_ooorder_pkts) {
|
||||
this.c2s_tcp_ooorder_pkts = c2s_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_ooorder_pkts() {
|
||||
return s2c_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_ooorder_pkts(Long s2c_tcp_ooorder_pkts) {
|
||||
this.s2c_tcp_ooorder_pkts = s2c_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_retransmitted_pkts() {
|
||||
return c2s_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_retransmitted_pkts(Long c2s_tcp_retransmitted_pkts) {
|
||||
this.c2s_tcp_retransmitted_pkts = c2s_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_retransmitted_pkts() {
|
||||
return s2c_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_retransmitted_pkts(Long s2c_tcp_retransmitted_pkts) {
|
||||
this.s2c_tcp_retransmitted_pkts = s2c_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_retransmitted_bytes() {
|
||||
return c2s_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_retransmitted_bytes(Long c2s_tcp_retransmitted_bytes) {
|
||||
this.c2s_tcp_retransmitted_bytes = c2s_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_retransmitted_bytes() {
|
||||
return s2c_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_retransmitted_bytes(Long s2c_tcp_retransmitted_bytes) {
|
||||
this.s2c_tcp_retransmitted_bytes = s2c_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public String getClient_ip_sketch() {
|
||||
return client_ip_sketch;
|
||||
}
|
||||
|
||||
public void setClient_ip_sketch(String client_ip_sketch) {
|
||||
this.client_ip_sketch = client_ip_sketch;
|
||||
}
|
||||
}
|
||||
213
src/main/java/com/zdjizhi/common/pojo/Fields.java
Normal file
213
src/main/java/com/zdjizhi/common/pojo/Fields.java
Normal file
@@ -0,0 +1,213 @@
|
||||
package com.zdjizhi.common.pojo;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.common.pojo
|
||||
* @Description:
|
||||
* @date 2023/4/2311:47
|
||||
*/
|
||||
public class Fields {
|
||||
private Long sessions;
|
||||
private Long in_bytes;
|
||||
private Long out_bytes;
|
||||
private Long in_pkts;
|
||||
private Long out_pkts;
|
||||
private Long c2s_pkts;
|
||||
private Long s2c_pkts;
|
||||
private Long c2s_bytes;
|
||||
private Long s2c_bytes;
|
||||
private Long c2s_fragments;
|
||||
private Long s2c_fragments;
|
||||
private Long c2s_tcp_lost_bytes;
|
||||
private Long s2c_tcp_lost_bytes;
|
||||
private Long c2s_tcp_ooorder_pkts;
|
||||
private Long s2c_tcp_ooorder_pkts;
|
||||
private Long c2s_tcp_retransmitted_pkts;
|
||||
private Long s2c_tcp_retransmitted_pkts;
|
||||
private Long c2s_tcp_retransmitted_bytes;
|
||||
private Long s2c_tcp_retransmitted_bytes;
|
||||
private byte[] client_ip_sketch;
|
||||
|
||||
public Fields(Long sessions, Long in_bytes, Long out_bytes, Long in_pkts, Long out_pkts, Long c2s_pkts, Long s2c_pkts, Long c2s_bytes, Long s2c_bytes, Long c2s_fragments, Long s2c_fragments, Long c2s_tcp_lost_bytes, Long s2c_tcp_lost_bytes, Long c2s_tcp_ooorder_pkts, Long s2c_tcp_ooorder_pkts, Long c2s_tcp_retransmitted_pkts, Long s2c_tcp_retransmitted_pkts, Long c2s_tcp_retransmitted_bytes, Long s2c_tcp_retransmitted_bytes, byte[] client_ip_sketch) {
|
||||
this.sessions = sessions;
|
||||
this.in_bytes = in_bytes;
|
||||
this.out_bytes = out_bytes;
|
||||
this.in_pkts = in_pkts;
|
||||
this.out_pkts = out_pkts;
|
||||
this.c2s_pkts = c2s_pkts;
|
||||
this.s2c_pkts = s2c_pkts;
|
||||
this.c2s_bytes = c2s_bytes;
|
||||
this.s2c_bytes = s2c_bytes;
|
||||
this.c2s_fragments = c2s_fragments;
|
||||
this.s2c_fragments = s2c_fragments;
|
||||
this.c2s_tcp_lost_bytes = c2s_tcp_lost_bytes;
|
||||
this.s2c_tcp_lost_bytes = s2c_tcp_lost_bytes;
|
||||
this.c2s_tcp_ooorder_pkts = c2s_tcp_ooorder_pkts;
|
||||
this.s2c_tcp_ooorder_pkts = s2c_tcp_ooorder_pkts;
|
||||
this.c2s_tcp_retransmitted_pkts = c2s_tcp_retransmitted_pkts;
|
||||
this.s2c_tcp_retransmitted_pkts = s2c_tcp_retransmitted_pkts;
|
||||
this.c2s_tcp_retransmitted_bytes = c2s_tcp_retransmitted_bytes;
|
||||
this.s2c_tcp_retransmitted_bytes = s2c_tcp_retransmitted_bytes;
|
||||
this.client_ip_sketch = client_ip_sketch;
|
||||
}
|
||||
|
||||
public Long getSessions() {
|
||||
return sessions;
|
||||
}
|
||||
|
||||
public void setSessions(Long sessions) {
|
||||
this.sessions = sessions;
|
||||
}
|
||||
|
||||
public Long getIn_bytes() {
|
||||
return in_bytes;
|
||||
}
|
||||
|
||||
public void setIn_bytes(Long in_bytes) {
|
||||
this.in_bytes = in_bytes;
|
||||
}
|
||||
|
||||
public Long getOut_bytes() {
|
||||
return out_bytes;
|
||||
}
|
||||
|
||||
public void setOut_bytes(Long out_bytes) {
|
||||
this.out_bytes = out_bytes;
|
||||
}
|
||||
|
||||
public Long getIn_pkts() {
|
||||
return in_pkts;
|
||||
}
|
||||
|
||||
public void setIn_pkts(Long in_pkts) {
|
||||
this.in_pkts = in_pkts;
|
||||
}
|
||||
|
||||
public Long getOut_pkts() {
|
||||
return out_pkts;
|
||||
}
|
||||
|
||||
public void setOut_pkts(Long out_pkts) {
|
||||
this.out_pkts = out_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_pkts() {
|
||||
return c2s_pkts;
|
||||
}
|
||||
|
||||
public void setC2s_pkts(Long c2s_pkts) {
|
||||
this.c2s_pkts = c2s_pkts;
|
||||
}
|
||||
|
||||
public Long getS2c_pkts() {
|
||||
return s2c_pkts;
|
||||
}
|
||||
|
||||
public void setS2c_pkts(Long s2c_pkts) {
|
||||
this.s2c_pkts = s2c_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_bytes() {
|
||||
return c2s_bytes;
|
||||
}
|
||||
|
||||
public void setC2s_bytes(Long c2s_bytes) {
|
||||
this.c2s_bytes = c2s_bytes;
|
||||
}
|
||||
|
||||
public Long getS2c_bytes() {
|
||||
return s2c_bytes;
|
||||
}
|
||||
|
||||
public void setS2c_bytes(Long s2c_bytes) {
|
||||
this.s2c_bytes = s2c_bytes;
|
||||
}
|
||||
|
||||
public Long getC2s_fragments() {
|
||||
return c2s_fragments;
|
||||
}
|
||||
|
||||
public void setC2s_fragments(Long c2s_fragments) {
|
||||
this.c2s_fragments = c2s_fragments;
|
||||
}
|
||||
|
||||
public Long getS2c_fragments() {
|
||||
return s2c_fragments;
|
||||
}
|
||||
|
||||
public void setS2c_fragments(Long s2c_fragments) {
|
||||
this.s2c_fragments = s2c_fragments;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_lost_bytes() {
|
||||
return c2s_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_lost_bytes(Long c2s_tcp_lost_bytes) {
|
||||
this.c2s_tcp_lost_bytes = c2s_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_lost_bytes() {
|
||||
return s2c_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_lost_bytes(Long s2c_tcp_lost_bytes) {
|
||||
this.s2c_tcp_lost_bytes = s2c_tcp_lost_bytes;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_ooorder_pkts() {
|
||||
return c2s_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_ooorder_pkts(Long c2s_tcp_ooorder_pkts) {
|
||||
this.c2s_tcp_ooorder_pkts = c2s_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_ooorder_pkts() {
|
||||
return s2c_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_ooorder_pkts(Long s2c_tcp_ooorder_pkts) {
|
||||
this.s2c_tcp_ooorder_pkts = s2c_tcp_ooorder_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_retransmitted_pkts() {
|
||||
return c2s_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_retransmitted_pkts(Long c2s_tcp_retransmitted_pkts) {
|
||||
this.c2s_tcp_retransmitted_pkts = c2s_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_retransmitted_pkts() {
|
||||
return s2c_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_retransmitted_pkts(Long s2c_tcp_retransmitted_pkts) {
|
||||
this.s2c_tcp_retransmitted_pkts = s2c_tcp_retransmitted_pkts;
|
||||
}
|
||||
|
||||
public Long getC2s_tcp_retransmitted_bytes() {
|
||||
return c2s_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public void setC2s_tcp_retransmitted_bytes(Long c2s_tcp_retransmitted_bytes) {
|
||||
this.c2s_tcp_retransmitted_bytes = c2s_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public Long getS2c_tcp_retransmitted_bytes() {
|
||||
return s2c_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public void setS2c_tcp_retransmitted_bytes(Long s2c_tcp_retransmitted_bytes) {
|
||||
this.s2c_tcp_retransmitted_bytes = s2c_tcp_retransmitted_bytes;
|
||||
}
|
||||
|
||||
public byte[] getClient_ip_sketch() {
|
||||
return client_ip_sketch;
|
||||
}
|
||||
|
||||
public void setClient_ip_sketch(byte[] client_ip_sketch) {
|
||||
this.client_ip_sketch = client_ip_sketch;
|
||||
}
|
||||
}
|
||||
73
src/main/java/com/zdjizhi/common/pojo/Tags.java
Normal file
73
src/main/java/com/zdjizhi/common/pojo/Tags.java
Normal file
@@ -0,0 +1,73 @@
|
||||
package com.zdjizhi.common.pojo;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.common.pojo
|
||||
* @Description:
|
||||
* @date 2023/4/2311:48
|
||||
*/
|
||||
public class Tags {
|
||||
private int vsys_id;
|
||||
private String device_id;
|
||||
private String device_group;
|
||||
private String data_center;
|
||||
private String protocol_label;
|
||||
private String app_full_path;
|
||||
|
||||
public Tags(int vsys_id, String device_id, String device_group, String data_center, String protocol_label, String app_full_path) {
|
||||
this.vsys_id = vsys_id;
|
||||
this.device_id = device_id;
|
||||
this.device_group = device_group;
|
||||
this.data_center = data_center;
|
||||
this.protocol_label = protocol_label;
|
||||
this.app_full_path = app_full_path;
|
||||
}
|
||||
|
||||
public int getVsys_id() {
|
||||
return vsys_id;
|
||||
}
|
||||
|
||||
public void setVsys_id(int vsys_id) {
|
||||
this.vsys_id = vsys_id;
|
||||
}
|
||||
|
||||
public String getDevice_id() {
|
||||
return device_id;
|
||||
}
|
||||
|
||||
public void setDevice_id(String device_id) {
|
||||
this.device_id = device_id;
|
||||
}
|
||||
|
||||
public String getDevice_group() {
|
||||
return device_group;
|
||||
}
|
||||
|
||||
public void setDevice_group(String device_group) {
|
||||
this.device_group = device_group;
|
||||
}
|
||||
|
||||
public String getData_center() {
|
||||
return data_center;
|
||||
}
|
||||
|
||||
public void setData_center(String data_center) {
|
||||
this.data_center = data_center;
|
||||
}
|
||||
|
||||
public String getProtocol_label() {
|
||||
return protocol_label;
|
||||
}
|
||||
|
||||
public void setProtocol_label(String protocol_label) {
|
||||
this.protocol_label = protocol_label;
|
||||
}
|
||||
|
||||
public String getApp_full_path() {
|
||||
return app_full_path;
|
||||
}
|
||||
|
||||
public void setApp_full_path(String app_full_path) {
|
||||
this.app_full_path = app_full_path;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
package com.zdjizhi.topology;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.zdjizhi.common.config.GlobalConfig;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import com.zdjizhi.utils.functions.filter.DataTypeFilter;
|
||||
import com.zdjizhi.utils.functions.keyby.DimensionKeyBy;
|
||||
import com.zdjizhi.utils.functions.map.MetricsParseMap;
|
||||
import com.zdjizhi.utils.functions.map.ResultFlatMap;
|
||||
import com.zdjizhi.utils.functions.statistics.DispersionCountWindow;
|
||||
import com.zdjizhi.utils.functions.statistics.MergeCountWindow;
|
||||
import com.zdjizhi.utils.kafka.KafkaConsumer;
|
||||
import com.zdjizhi.utils.kafka.KafkaProducer;
|
||||
import org.apache.flink.api.java.tuple.Tuple2;
|
||||
import org.apache.flink.streaming.api.datastream.DataStream;
|
||||
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
|
||||
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
|
||||
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
|
||||
import org.apache.flink.streaming.api.windowing.time.Time;
|
||||
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.topology
|
||||
* @Description:
|
||||
* @date 2021/5/2016:42
|
||||
*/
|
||||
public class ApplicationProtocolTopology {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
public static void main(String[] args) {
|
||||
try {
|
||||
final StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
|
||||
|
||||
//解析原始日志
|
||||
DataStream<String> streamSource = environment.addSource(KafkaConsumer.getKafkaConsumer())
|
||||
.setParallelism(GlobalConfig.SOURCE_PARALLELISM).name(GlobalConfig.SOURCE_KAFKA_TOPIC);
|
||||
|
||||
SingleOutputStreamOperator<String> appProtocolFilter = streamSource.filter(new DataTypeFilter())
|
||||
.name("appProtocolFilter").setParallelism(GlobalConfig.SOURCE_PARALLELISM);
|
||||
|
||||
|
||||
SingleOutputStreamOperator<Tuple2<String, AppProtocol>> parseDataMap = appProtocolFilter.map(new MetricsParseMap())
|
||||
.name("ParseDataMap").setParallelism(GlobalConfig.PARSE_PARALLELISM);
|
||||
|
||||
SingleOutputStreamOperator<AppProtocol> dispersionCountWindow = parseDataMap.keyBy(new DimensionKeyBy())
|
||||
.window(TumblingProcessingTimeWindows.of(Time.seconds(GlobalConfig.COUNT_WINDOW_TIME)))
|
||||
.reduce(new DispersionCountWindow(), new MergeCountWindow())
|
||||
.name("DispersionCountWindow")
|
||||
.setParallelism(GlobalConfig.WINDOW_PARALLELISM);
|
||||
|
||||
SingleOutputStreamOperator<String> resultFlatMap = dispersionCountWindow.flatMap(new ResultFlatMap())
|
||||
.name("ResultFlatMap").setParallelism(GlobalConfig.SINK_PARALLELISM);
|
||||
|
||||
|
||||
resultFlatMap.addSink(KafkaProducer.getKafkaProducer())
|
||||
.setParallelism(GlobalConfig.SINK_PARALLELISM).name(GlobalConfig.SINK_KAFKA_TOPIC);
|
||||
|
||||
environment.execute(args[0]);
|
||||
} catch (Exception e) {
|
||||
logger.error("This Flink task start ERROR! Exception information is :" + e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package com.zdjizhi.utils.exception;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.storm.utils.execption
|
||||
* @Description:
|
||||
* @date 2021/3/259:42
|
||||
*/
|
||||
public class AnalysisException extends RuntimeException {
|
||||
|
||||
public AnalysisException() {
|
||||
}
|
||||
|
||||
public AnalysisException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package com.zdjizhi.utils.functions.filter;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.alibaba.fastjson2.JSONPath;
|
||||
import com.alibaba.fastjson2.JSONReader;
|
||||
import com.zdjizhi.utils.StringUtil;
|
||||
import org.apache.flink.api.common.functions.FilterFunction;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.functions.filter
|
||||
* @Description:
|
||||
* @date 2023/4/1919:02
|
||||
*/
|
||||
public class DataTypeFilter implements FilterFunction<String> {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
private static final String dataTypeExpr = "[?(@.name = 'traffic_application_protocol_stat')]";
|
||||
|
||||
@Override
|
||||
public boolean filter(String message) throws Exception {
|
||||
boolean protocolData = false;
|
||||
try {
|
||||
if (StringUtil.isNotBlank(message)) {
|
||||
Object name = JSONPath.eval(message, dataTypeExpr);
|
||||
if (name != null) {
|
||||
protocolData = true;
|
||||
}
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("Parsing metric data is abnormal! The exception message is:" + e.getMessage());
|
||||
}
|
||||
return protocolData;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.zdjizhi.utils.functions.keyby;
|
||||
|
||||
import com.alibaba.fastjson2.JSONObject;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import com.zdjizhi.common.pojo.Fields;
|
||||
import com.zdjizhi.common.pojo.Tags;
|
||||
import org.apache.flink.api.java.functions.KeySelector;
|
||||
import org.apache.flink.api.java.tuple.Tuple2;
|
||||
import org.apache.flink.api.java.tuple.Tuple3;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.functions
|
||||
* @Description:
|
||||
* @date 2021/7/2112:13
|
||||
*/
|
||||
public class DimensionKeyBy implements KeySelector<Tuple2<String, AppProtocol>, String> {
|
||||
|
||||
@Override
|
||||
public String getKey(Tuple2<String, AppProtocol> value) throws Exception {
|
||||
//以map拼接的key分组
|
||||
return value.f0;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package com.zdjizhi.utils.functions.map;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.alibaba.fastjson2.JSON;
|
||||
import com.alibaba.fastjson2.JSONObject;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import com.zdjizhi.utils.StringUtil;
|
||||
import org.apache.flink.api.common.functions.MapFunction;
|
||||
import org.apache.flink.api.java.tuple.Tuple2;
|
||||
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.functions
|
||||
* @Description:
|
||||
* @date 2021/5/2715:01
|
||||
*/
|
||||
public class MetricsParseMap implements MapFunction<String, Tuple2<String, AppProtocol>> {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Tuple2<String, AppProtocol> map(String message) {
|
||||
try {
|
||||
JSONObject originalLog = JSON.parseObject(message);
|
||||
JSONObject fieldsObject = JSONObject.parseObject(originalLog.getString("fields"));
|
||||
JSONObject tagsObject = JSONObject.parseObject(originalLog.getString("tags"));
|
||||
fieldsObject.putAll(tagsObject);
|
||||
|
||||
AppProtocol appProtocol = JSON.to(AppProtocol.class, fieldsObject);
|
||||
|
||||
String appFullPath = appProtocol.getApp_name();
|
||||
if (StringUtil.isNotBlank(appFullPath)) {
|
||||
String appName = appFullPath.substring(appFullPath.lastIndexOf(".") + 1);
|
||||
String protocolLabel = appProtocol.getProtocol_stack_id();
|
||||
|
||||
appProtocol.setApp_name(appName);
|
||||
appProtocol.setProtocol_stack_id(protocolLabel.concat(".").concat(appFullPath));
|
||||
}
|
||||
|
||||
return new Tuple2<>(tagsObject.toJSONString(), appProtocol);
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("An error occurred in the original log parsing reorganization,error message is:" + e);
|
||||
return new Tuple2<>(null, null);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.zdjizhi.utils.functions.map;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.alibaba.fastjson2.JSONObject;
|
||||
import com.alibaba.fastjson2.JSONWriter;
|
||||
import com.zdjizhi.common.config.GlobalConfig;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import com.zdjizhi.utils.StringUtil;
|
||||
import com.zdjizhi.utils.general.FormatConverterUtil;
|
||||
import org.apache.flink.api.common.functions.FlatMapFunction;
|
||||
import org.apache.flink.util.Collector;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.functions
|
||||
* @Description:
|
||||
* @date 2021/7/2114:52
|
||||
*/
|
||||
public class ResultFlatMap implements FlatMapFunction<AppProtocol, String> {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void flatMap(AppProtocol appProtocol, Collector<String> out) throws Exception {
|
||||
try {
|
||||
JSONObject tags = FormatConverterUtil.getTags(appProtocol);
|
||||
JSONObject conversion = FormatConverterUtil.structureConversion(appProtocol);
|
||||
|
||||
String protocolStackId = tags.getString("protocol_stack_id");
|
||||
|
||||
out.collect(FormatConverterUtil.updateTagsData(conversion, tags));
|
||||
tags.remove("app_name");
|
||||
|
||||
StringBuilder stringBuilder = new StringBuilder();
|
||||
String[] protocolIds = protocolStackId.split(GlobalConfig.PROTOCOL_SPLITTER);
|
||||
int protocolIdsNum = protocolIds.length;
|
||||
for (int i = 0; i < protocolIdsNum - 1; i++) {
|
||||
if (StringUtil.isBlank(stringBuilder.toString())) {
|
||||
stringBuilder.append(protocolIds[i]);
|
||||
tags.put("protocol_stack_id", stringBuilder.toString());
|
||||
out.collect(FormatConverterUtil.updateTagsData(conversion, tags));
|
||||
} else {
|
||||
stringBuilder.append(".").append(protocolIds[i]);
|
||||
tags.put("protocol_stack_id", stringBuilder.toString());
|
||||
conversion.put("tags", tags);
|
||||
out.collect(FormatConverterUtil.updateTagsData(conversion, tags));
|
||||
}
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("An exception occurred during parsing the result data,error message is:" + e);
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.zdjizhi.utils.functions.statistics;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import com.zdjizhi.utils.general.MetricUtil;
|
||||
import org.apache.flink.api.common.functions.ReduceFunction;
|
||||
import org.apache.flink.api.java.tuple.Tuple2;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.functions.statistics
|
||||
* @Description:
|
||||
* @date 2023/4/2314:02
|
||||
*/
|
||||
public class DispersionCountWindow implements ReduceFunction<Tuple2<String, AppProtocol>> {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
@Override
|
||||
public Tuple2<String, AppProtocol> reduce(Tuple2<String, AppProtocol> value1, Tuple2<String, AppProtocol> value2) throws Exception {
|
||||
try {
|
||||
AppProtocol cacheData = value1.f1;
|
||||
AppProtocol newData = value2.f1;
|
||||
|
||||
MetricUtil.statisticsMetrics(cacheData, newData);
|
||||
|
||||
return new Tuple2<>(value1.f0, cacheData);
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("An exception occurred during incremental aggregation! The message is:" + e.getMessage());
|
||||
return value1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
package com.zdjizhi.utils.functions.statistics;
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import org.apache.flink.api.java.tuple.Tuple2;
|
||||
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
|
||||
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
|
||||
import org.apache.flink.util.Collector;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.functions.statistics
|
||||
* @Description:
|
||||
* @date 2023/4/2314:43
|
||||
*/
|
||||
public class MergeCountWindow extends ProcessWindowFunction<Tuple2<String, AppProtocol>, AppProtocol, String, TimeWindow> {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
@Override
|
||||
public void process(String windowKey, Context context, Iterable<Tuple2<String, AppProtocol>> input, Collector<AppProtocol> output) throws Exception {
|
||||
try {
|
||||
Long endTime = context.window().getEnd() / 1000;
|
||||
for (Tuple2<String, AppProtocol> tuple : input) {
|
||||
AppProtocol data = tuple.f1;
|
||||
data.setTimestamp(endTime);
|
||||
output.collect(data);
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("An exception occurred in the process of full data aggregation! The message is:" + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package com.zdjizhi.utils.general;
|
||||
|
||||
import com.alibaba.fastjson2.JSONObject;
|
||||
import com.alibaba.fastjson2.JSONWriter;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.general
|
||||
* @Description:
|
||||
* @date 2023/5/519:04
|
||||
*/
|
||||
public class FormatConverterUtil {
|
||||
|
||||
/**
|
||||
* 生成tags类型数据
|
||||
*
|
||||
* @param appProtocol 结果集
|
||||
* @return tags结果
|
||||
*/
|
||||
public static JSONObject getTags(AppProtocol appProtocol) {
|
||||
JSONObject tags = new JSONObject();
|
||||
tags.fluentPut("vsys_id", appProtocol.getVsys_id())
|
||||
.fluentPut("device_id", appProtocol.getDevice_id())
|
||||
.fluentPut("device_group", appProtocol.getDevice_group())
|
||||
.fluentPut("data_center", appProtocol.getData_center())
|
||||
.fluentPut("protocol_stack_id", appProtocol.getProtocol_stack_id())
|
||||
.fluentPut("app_name", appProtocol.getApp_name());
|
||||
|
||||
return tags;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* 将数据结构转换为最终的结构
|
||||
*
|
||||
* @param appProtocol 结果集
|
||||
* @return 结果数据
|
||||
*/
|
||||
public static JSONObject structureConversion(AppProtocol appProtocol) {
|
||||
JSONObject metrics = new JSONObject();
|
||||
JSONObject fields = new JSONObject();
|
||||
|
||||
fields.fluentPut("sessions", appProtocol.getSessions())
|
||||
.fluentPut("in_bytes", appProtocol.getIn_bytes())
|
||||
.fluentPut("out_bytes", appProtocol.getOut_bytes())
|
||||
.fluentPut("in_pkts", appProtocol.getIn_pkts())
|
||||
.fluentPut("out_pkts", appProtocol.getOut_pkts())
|
||||
.fluentPut("c2s_bytes", appProtocol.getC2s_bytes())
|
||||
.fluentPut("s2c_bytes", appProtocol.getS2c_bytes())
|
||||
.fluentPut("c2s_pkts", appProtocol.getC2s_pkts())
|
||||
.fluentPut("s2c_pkts", appProtocol.getS2c_pkts())
|
||||
.fluentPut("c2s_fragments", appProtocol.getC2s_fragments())
|
||||
.fluentPut("s2c_fragments", appProtocol.getS2c_fragments())
|
||||
.fluentPut("c2s_tcp_lost_bytes", appProtocol.getC2s_tcp_lost_bytes())
|
||||
.fluentPut("s2c_tcp_lost_bytes", appProtocol.getS2c_tcp_lost_bytes())
|
||||
.fluentPut("c2s_tcp_ooorder_pkts", appProtocol.getC2s_tcp_ooorder_pkts())
|
||||
.fluentPut("s2c_tcp_ooorder_pkts", appProtocol.getS2c_tcp_ooorder_pkts())
|
||||
.fluentPut("c2s_tcp_retransmitted_pkts", appProtocol.getC2s_tcp_retransmitted_bytes())
|
||||
.fluentPut("s2c_tcp_retransmitted_pkts", appProtocol.getS2c_tcp_retransmitted_bytes())
|
||||
.fluentPut("c2s_tcp_retransmitted_bytes", appProtocol.getC2s_tcp_retransmitted_pkts())
|
||||
.fluentPut("s2c_tcp_retransmitted_bytes", appProtocol.getS2c_tcp_retransmitted_pkts())
|
||||
.fluentPut("client_ip_sketch", appProtocol.getClient_ip_sketch());
|
||||
|
||||
metrics.put("timestamp", appProtocol.getTimestamp());
|
||||
metrics.put("name", "application_protocol_stat");
|
||||
|
||||
metrics.fluentPut("timestamp", appProtocol.getTimestamp())
|
||||
.fluentPut("name", "application_protocol_stat")
|
||||
.fluentPut("fields", fields);
|
||||
|
||||
return metrics;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 更新结果集tags数据(不同协议层级),并输出json
|
||||
*
|
||||
* @param conversion 结果集
|
||||
* @param tags tags结果
|
||||
* @return 结果json
|
||||
*/
|
||||
public static String updateTagsData(JSONObject conversion, JSONObject tags) {
|
||||
conversion.put("tags", tags);
|
||||
|
||||
return JSONObject.toJSONString(conversion
|
||||
, JSONWriter.Feature.WriteNullStringAsEmpty
|
||||
, JSONWriter.Feature.WriteNullNumberAsZero);
|
||||
}
|
||||
|
||||
}
|
||||
111
src/main/java/com/zdjizhi/utils/general/MetricUtil.java
Normal file
111
src/main/java/com/zdjizhi/utils/general/MetricUtil.java
Normal file
@@ -0,0 +1,111 @@
|
||||
package com.zdjizhi.utils.general;
|
||||
|
||||
|
||||
import cn.hutool.log.Log;
|
||||
import cn.hutool.log.LogFactory;
|
||||
import com.zdjizhi.common.pojo.AppProtocol;
|
||||
import com.zdjizhi.utils.StringUtil;
|
||||
import org.apache.datasketches.hll.HllSketch;
|
||||
import org.apache.datasketches.hll.Union;
|
||||
|
||||
import java.util.Base64;
|
||||
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.storm.utils.general
|
||||
* @Description:
|
||||
* @date 2021/7/2015:31
|
||||
*/
|
||||
public class MetricUtil {
|
||||
private static final Log logger = LogFactory.get();
|
||||
|
||||
|
||||
/**
|
||||
* 用于对业务指标进行统计
|
||||
*
|
||||
* @param cacheData 缓存中数据
|
||||
* @param newData 新数据
|
||||
*/
|
||||
public static void statisticsMetrics(AppProtocol cacheData, AppProtocol newData) {
|
||||
cacheData.setSessions(MetricUtil.longSum(cacheData.getSessions(), newData.getSessions()));
|
||||
|
||||
cacheData.setIn_bytes(MetricUtil.longSum(cacheData.getIn_bytes(), newData.getIn_bytes()));
|
||||
cacheData.setOut_pkts(MetricUtil.longSum(cacheData.getOut_bytes(), newData.getOut_bytes()));
|
||||
cacheData.setIn_pkts(MetricUtil.longSum(cacheData.getIn_pkts(), newData.getIn_pkts()));
|
||||
cacheData.setOut_pkts(MetricUtil.longSum(cacheData.getOut_pkts(), newData.getOut_pkts()));
|
||||
|
||||
cacheData.setC2s_bytes(MetricUtil.longSum(cacheData.getC2s_bytes(), newData.getC2s_bytes()));
|
||||
cacheData.setS2c_bytes(MetricUtil.longSum(cacheData.getS2c_bytes(), newData.getS2c_bytes()));
|
||||
cacheData.setC2s_pkts(MetricUtil.longSum(cacheData.getC2s_pkts(), newData.getC2s_pkts()));
|
||||
cacheData.setS2c_pkts(MetricUtil.longSum(cacheData.getS2c_pkts(), newData.getS2c_pkts()));
|
||||
|
||||
cacheData.setC2s_fragments(MetricUtil.longSum(cacheData.getC2s_fragments(), newData.getC2s_fragments()));
|
||||
cacheData.setS2c_fragments(MetricUtil.longSum(cacheData.getS2c_fragments(), newData.getS2c_fragments()));
|
||||
|
||||
cacheData.setC2s_tcp_lost_bytes(MetricUtil.longSum(cacheData.getC2s_tcp_lost_bytes(), newData.getC2s_tcp_lost_bytes()));
|
||||
cacheData.setS2c_tcp_lost_bytes(MetricUtil.longSum(cacheData.getS2c_tcp_lost_bytes(), newData.getS2c_tcp_lost_bytes()));
|
||||
|
||||
cacheData.setC2s_tcp_ooorder_pkts(MetricUtil.longSum(cacheData.getC2s_tcp_ooorder_pkts(), newData.getC2s_tcp_ooorder_pkts()));
|
||||
cacheData.setS2c_tcp_ooorder_pkts(MetricUtil.longSum(cacheData.getS2c_tcp_ooorder_pkts(), newData.getS2c_tcp_ooorder_pkts()));
|
||||
|
||||
cacheData.setC2s_tcp_retransmitted_bytes(MetricUtil.longSum(cacheData.getC2s_tcp_retransmitted_bytes(), newData.getC2s_tcp_retransmitted_bytes()));
|
||||
cacheData.setS2c_tcp_retransmitted_bytes(MetricUtil.longSum(cacheData.getS2c_tcp_retransmitted_bytes(), newData.getS2c_tcp_retransmitted_bytes()));
|
||||
|
||||
cacheData.setC2s_tcp_retransmitted_pkts(MetricUtil.longSum(cacheData.getC2s_tcp_retransmitted_pkts(), newData.getC2s_tcp_retransmitted_pkts()));
|
||||
cacheData.setS2c_tcp_retransmitted_pkts(MetricUtil.longSum(cacheData.getS2c_tcp_retransmitted_pkts(), newData.getS2c_tcp_retransmitted_pkts()));
|
||||
|
||||
cacheData.setClient_ip_sketch(MetricUtil.hllSketchUnion(cacheData.getClient_ip_sketch(), newData.getClient_ip_sketch()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Long类型的数据求和
|
||||
*
|
||||
* @param value1 第一个值
|
||||
* @param value2 第二个值
|
||||
* @return value1 + value2
|
||||
*/
|
||||
private static Long longSum(Long value1, Long value2) {
|
||||
Long result = 0L;
|
||||
try {
|
||||
if (value1 > 0 && value2 > 0) {
|
||||
result = value1 + value2;
|
||||
} else {
|
||||
result = value1;
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("Abnormal sending of traffic indicator statistics! The message is:" + e.getMessage());
|
||||
result = value1;
|
||||
}
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cacheHll 缓存的sketch
|
||||
* @param newHll 聚合后的sketch
|
||||
* @return 合并后的sketch
|
||||
*/
|
||||
private static String hllSketchUnion(String cacheHll, String newHll) {
|
||||
Union union = new Union(12);
|
||||
try {
|
||||
if (StringUtil.isNotBlank(cacheHll)) {
|
||||
byte[] cacheHllBytes = Base64.getDecoder().decode(cacheHll);
|
||||
HllSketch cacheSketch = HllSketch.heapify(cacheHllBytes);
|
||||
union.update(cacheSketch);
|
||||
}
|
||||
|
||||
if (StringUtil.isNotBlank(newHll)) {
|
||||
byte[] newHllBytes = Base64.getDecoder().decode(newHll);
|
||||
HllSketch newSketch = HllSketch.heapify(newHllBytes);
|
||||
union.update(newSketch);
|
||||
}
|
||||
return Base64.getEncoder().encodeToString(union.getResult().toUpdatableByteArray());
|
||||
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("Merge hllSketch results abnormal! The message is:" + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
48
src/main/java/com/zdjizhi/utils/kafka/CertUtils.java
Normal file
48
src/main/java/com/zdjizhi/utils/kafka/CertUtils.java
Normal file
@@ -0,0 +1,48 @@
|
||||
package com.zdjizhi.utils.kafka;
|
||||
|
||||
import com.zdjizhi.common.config.GlobalConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.kafka
|
||||
* @Description:
|
||||
* @date 2021/9/610:37
|
||||
*/
|
||||
class CertUtils {
|
||||
/**
|
||||
* Kafka SASL认证端口
|
||||
*/
|
||||
private static final String SASL_PORT = "9094";
|
||||
|
||||
/**
|
||||
* Kafka SSL认证端口
|
||||
*/
|
||||
private static final String SSL_PORT = "9095";
|
||||
|
||||
/**
|
||||
* 根据连接信息端口判断认证方式。
|
||||
*
|
||||
* @param servers kafka 连接信息
|
||||
* @param properties kafka 连接配置信息
|
||||
*/
|
||||
static void chooseCert(String servers, Properties properties) {
|
||||
if (servers.contains(SASL_PORT)) {
|
||||
properties.put("security.protocol", "SASL_PLAINTEXT");
|
||||
properties.put("sasl.mechanism", "PLAIN");
|
||||
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username="
|
||||
+ GlobalConfig.KAFKA_SASL_JAAS_USER + " password=" + GlobalConfig.KAFKA_SASL_JAAS_PIN + ";");
|
||||
} else if (servers.contains(SSL_PORT)) {
|
||||
properties.put("security.protocol", "SSL");
|
||||
properties.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
|
||||
properties.put("ssl.keystore.location", GlobalConfig.TOOLS_LIBRARY + "keystore.jks");
|
||||
properties.put("ssl.keystore.password", GlobalConfig.KAFKA_SASL_JAAS_PIN);
|
||||
properties.put("ssl.truststore.location", GlobalConfig.TOOLS_LIBRARY + "truststore.jks");
|
||||
properties.put("ssl.truststore.password", GlobalConfig.KAFKA_SASL_JAAS_PIN);
|
||||
properties.put("ssl.key.password", GlobalConfig.KAFKA_SASL_JAAS_PIN);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
47
src/main/java/com/zdjizhi/utils/kafka/KafkaConsumer.java
Normal file
47
src/main/java/com/zdjizhi/utils/kafka/KafkaConsumer.java
Normal file
@@ -0,0 +1,47 @@
|
||||
package com.zdjizhi.utils.kafka;
|
||||
|
||||
import com.zdjizhi.common.config.GlobalConfig;
|
||||
import org.apache.flink.api.common.serialization.SimpleStringSchema;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.kafka
|
||||
* @Description:
|
||||
* @date 2021/6/813:54
|
||||
*/
|
||||
public class KafkaConsumer {
|
||||
private static Properties createConsumerConfig() {
|
||||
Properties properties = new Properties();
|
||||
properties.put("bootstrap.servers", GlobalConfig.SOURCE_KAFKA_SERVERS);
|
||||
properties.put("group.id", GlobalConfig.GROUP_ID);
|
||||
properties.put("session.timeout.ms", GlobalConfig.SESSION_TIMEOUT_MS);
|
||||
properties.put("max.poll.records", GlobalConfig.MAX_POLL_RECORDS);
|
||||
properties.put("max.partition.fetch.bytes", GlobalConfig.MAX_PARTITION_FETCH_BYTES);
|
||||
properties.put("partition.discovery.interval.ms", "10000");
|
||||
|
||||
CertUtils.chooseCert(GlobalConfig.SOURCE_KAFKA_SERVERS, properties);
|
||||
|
||||
return properties;
|
||||
}
|
||||
|
||||
/**
|
||||
* 官方序列化kafka数据
|
||||
*
|
||||
* @return kafka logs
|
||||
*/
|
||||
public static FlinkKafkaConsumer<String> getKafkaConsumer() {
|
||||
FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(GlobalConfig.SOURCE_KAFKA_TOPIC,
|
||||
new SimpleStringSchema(), createConsumerConfig());
|
||||
|
||||
//随着checkpoint提交,将offset提交到kafka
|
||||
kafkaConsumer.setCommitOffsetsOnCheckpoints(true);
|
||||
|
||||
//从消费组当前的offset开始消费
|
||||
kafkaConsumer.setStartFromGroupOffsets();
|
||||
|
||||
return kafkaConsumer;
|
||||
}
|
||||
}
|
||||
48
src/main/java/com/zdjizhi/utils/kafka/KafkaProducer.java
Normal file
48
src/main/java/com/zdjizhi/utils/kafka/KafkaProducer.java
Normal file
@@ -0,0 +1,48 @@
|
||||
package com.zdjizhi.utils.kafka;
|
||||
|
||||
import com.zdjizhi.common.config.GlobalConfig;
|
||||
import org.apache.flink.api.common.serialization.SimpleStringSchema;
|
||||
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* @author qidaijie
|
||||
* @Package com.zdjizhi.utils.kafka
|
||||
* @Description:
|
||||
* @date 2021/6/814:04
|
||||
*/
|
||||
public class KafkaProducer {
|
||||
|
||||
private static Properties createProducerConfig() {
|
||||
Properties properties = new Properties();
|
||||
properties.put("bootstrap.servers", GlobalConfig.SINK_KAFKA_SERVERS);
|
||||
properties.put("acks", GlobalConfig.PRODUCER_ACK);
|
||||
properties.put("retries", GlobalConfig.RETRIES);
|
||||
properties.put("linger.ms", GlobalConfig.LINGER_MS);
|
||||
properties.put("request.timeout.ms", GlobalConfig.REQUEST_TIMEOUT_MS);
|
||||
properties.put("batch.size", GlobalConfig.BATCH_SIZE);
|
||||
properties.put("buffer.memory", GlobalConfig.BUFFER_MEMORY);
|
||||
properties.put("max.request.size", GlobalConfig.MAX_REQUEST_SIZE);
|
||||
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, GlobalConfig.PRODUCER_KAFKA_COMPRESSION_TYPE);
|
||||
|
||||
CertUtils.chooseCert(GlobalConfig.SINK_KAFKA_SERVERS, properties);
|
||||
|
||||
return properties;
|
||||
}
|
||||
|
||||
|
||||
public static FlinkKafkaProducer<String> getKafkaProducer() {
|
||||
FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<String>(
|
||||
GlobalConfig.SINK_KAFKA_TOPIC,
|
||||
new SimpleStringSchema(),
|
||||
createProducerConfig(), Optional.empty());
|
||||
|
||||
//启用此选项将使生产者仅记录失败日志而不是捕获和重新抛出它们
|
||||
kafkaProducer.setLogFailuresOnly(true);
|
||||
|
||||
return kafkaProducer;
|
||||
}
|
||||
}
|
||||
25
src/main/java/log4j.properties
Normal file
25
src/main/java/log4j.properties
Normal file
@@ -0,0 +1,25 @@
|
||||
#Log4j
|
||||
log4j.rootLogger=error,console,file
|
||||
# 控制台日志设置
|
||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.console.Threshold=error
|
||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] [Thread\:%t] %l %x - <%m>%n
|
||||
|
||||
# 文件日志设置
|
||||
log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.file.Threshold=error
|
||||
log4j.appender.file.encoding=UTF-8
|
||||
log4j.appender.file.Append=true
|
||||
#路径请用相对路径,做好相关测试输出到应用目下
|
||||
log4j.appender.file.file=${nis.root}/log/galaxy-name.log
|
||||
log4j.appender.file.DatePattern='.'yyyy-MM-dd
|
||||
log4j.appender.file.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.file.layout.ConversionPattern=%d{HH:mm:ss} %X{ip} [%t] %5p %c{1} %m%n
|
||||
log4j.appender.file.layout.ConversionPattern=[%d{yyyy-MM-dd HH\:mm\:ss}] [%-5p] %X{ip} [Thread\:%t] %l %x - %m%n
|
||||
#MyBatis 配置,com.nis.web.dao是mybatis接口所在包
|
||||
log4j.logger.com.nis.web.dao=error
|
||||
#bonecp数据源配置
|
||||
log4j.category.com.jolbox=error,console
|
||||
|
||||
|
||||
Reference in New Issue
Block a user