提交Live Traffic Chart重构后初版代码。(TSG-14799)

This commit is contained in:
qidaijie
2023-05-06 15:08:21 +08:00
parent dbb6481635
commit ef57dda773
27 changed files with 2158 additions and 88 deletions

View File

@@ -0,0 +1,18 @@
package com.zdjizhi.utils.exception;
/**
* @author qidaijie
* @Package com.zdjizhi.storm.utils.execption
* @Description:
* @date 2021/3/259:42
*/
public class AnalysisException extends RuntimeException {
public AnalysisException() {
}
public AnalysisException(String message) {
super(message);
}
}

View File

@@ -0,0 +1,36 @@
package com.zdjizhi.utils.functions.filter;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson2.JSONPath;
import com.alibaba.fastjson2.JSONReader;
import com.zdjizhi.utils.StringUtil;
import org.apache.flink.api.common.functions.FilterFunction;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.functions.filter
* @Description:
* @date 2023/4/1919:02
*/
public class DataTypeFilter implements FilterFunction<String> {
private static final Log logger = LogFactory.get();
private static final String dataTypeExpr = "[?(@.name = 'traffic_application_protocol_stat')]";
@Override
public boolean filter(String message) throws Exception {
boolean protocolData = false;
try {
if (StringUtil.isNotBlank(message)) {
Object name = JSONPath.eval(message, dataTypeExpr);
if (name != null) {
protocolData = true;
}
}
} catch (RuntimeException e) {
logger.error("Parsing metric data is abnormal! The exception message is:" + e.getMessage());
}
return protocolData;
}
}

View File

@@ -0,0 +1,26 @@
package com.zdjizhi.utils.functions.keyby;
import com.alibaba.fastjson2.JSONObject;
import com.zdjizhi.common.pojo.AppProtocol;
import com.zdjizhi.common.pojo.Fields;
import com.zdjizhi.common.pojo.Tags;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import java.util.Map;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.functions
* @Description:
* @date 2021/7/2112:13
*/
public class DimensionKeyBy implements KeySelector<Tuple2<String, AppProtocol>, String> {
@Override
public String getKey(Tuple2<String, AppProtocol> value) throws Exception {
//以map拼接的key分组
return value.f0;
}
}

View File

@@ -0,0 +1,49 @@
package com.zdjizhi.utils.functions.map;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson2.JSON;
import com.alibaba.fastjson2.JSONObject;
import com.zdjizhi.common.pojo.AppProtocol;
import com.zdjizhi.utils.StringUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.functions
* @Description:
* @date 2021/5/2715:01
*/
public class MetricsParseMap implements MapFunction<String, Tuple2<String, AppProtocol>> {
private static final Log logger = LogFactory.get();
@Override
@SuppressWarnings("unchecked")
public Tuple2<String, AppProtocol> map(String message) {
try {
JSONObject originalLog = JSON.parseObject(message);
JSONObject fieldsObject = JSONObject.parseObject(originalLog.getString("fields"));
JSONObject tagsObject = JSONObject.parseObject(originalLog.getString("tags"));
fieldsObject.putAll(tagsObject);
AppProtocol appProtocol = JSON.to(AppProtocol.class, fieldsObject);
String appFullPath = appProtocol.getApp_name();
if (StringUtil.isNotBlank(appFullPath)) {
String appName = appFullPath.substring(appFullPath.lastIndexOf(".") + 1);
String protocolLabel = appProtocol.getProtocol_stack_id();
appProtocol.setApp_name(appName);
appProtocol.setProtocol_stack_id(protocolLabel.concat(".").concat(appFullPath));
}
return new Tuple2<>(tagsObject.toJSONString(), appProtocol);
} catch (RuntimeException e) {
logger.error("An error occurred in the original log parsing reorganization,error message is:" + e);
return new Tuple2<>(null, null);
}
}
}

View File

@@ -0,0 +1,55 @@
package com.zdjizhi.utils.functions.map;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson2.JSONObject;
import com.alibaba.fastjson2.JSONWriter;
import com.zdjizhi.common.config.GlobalConfig;
import com.zdjizhi.common.pojo.AppProtocol;
import com.zdjizhi.utils.StringUtil;
import com.zdjizhi.utils.general.FormatConverterUtil;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.util.Collector;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.functions
* @Description:
* @date 2021/7/2114:52
*/
public class ResultFlatMap implements FlatMapFunction<AppProtocol, String> {
private static final Log logger = LogFactory.get();
@Override
@SuppressWarnings("unchecked")
public void flatMap(AppProtocol appProtocol, Collector<String> out) throws Exception {
try {
JSONObject tags = FormatConverterUtil.getTags(appProtocol);
JSONObject conversion = FormatConverterUtil.structureConversion(appProtocol);
String protocolStackId = tags.getString("protocol_stack_id");
out.collect(FormatConverterUtil.updateTagsData(conversion, tags));
tags.remove("app_name");
StringBuilder stringBuilder = new StringBuilder();
String[] protocolIds = protocolStackId.split(GlobalConfig.PROTOCOL_SPLITTER);
int protocolIdsNum = protocolIds.length;
for (int i = 0; i < protocolIdsNum - 1; i++) {
if (StringUtil.isBlank(stringBuilder.toString())) {
stringBuilder.append(protocolIds[i]);
tags.put("protocol_stack_id", stringBuilder.toString());
out.collect(FormatConverterUtil.updateTagsData(conversion, tags));
} else {
stringBuilder.append(".").append(protocolIds[i]);
tags.put("protocol_stack_id", stringBuilder.toString());
conversion.put("tags", tags);
out.collect(FormatConverterUtil.updateTagsData(conversion, tags));
}
}
} catch (RuntimeException e) {
logger.error("An exception occurred during parsing the result data,error message is:" + e);
e.printStackTrace();
}
}
}

View File

@@ -0,0 +1,33 @@
package com.zdjizhi.utils.functions.statistics;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.pojo.AppProtocol;
import com.zdjizhi.utils.general.MetricUtil;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple2;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.functions.statistics
* @Description:
* @date 2023/4/2314:02
*/
public class DispersionCountWindow implements ReduceFunction<Tuple2<String, AppProtocol>> {
private static final Log logger = LogFactory.get();
@Override
public Tuple2<String, AppProtocol> reduce(Tuple2<String, AppProtocol> value1, Tuple2<String, AppProtocol> value2) throws Exception {
try {
AppProtocol cacheData = value1.f1;
AppProtocol newData = value2.f1;
MetricUtil.statisticsMetrics(cacheData, newData);
return new Tuple2<>(value1.f0, cacheData);
} catch (RuntimeException e) {
logger.error("An exception occurred during incremental aggregation! The message is:" + e.getMessage());
return value1;
}
}
}

View File

@@ -0,0 +1,34 @@
package com.zdjizhi.utils.functions.statistics;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.pojo.AppProtocol;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.functions.statistics
* @Description:
* @date 2023/4/2314:43
*/
public class MergeCountWindow extends ProcessWindowFunction<Tuple2<String, AppProtocol>, AppProtocol, String, TimeWindow> {
private static final Log logger = LogFactory.get();
@Override
public void process(String windowKey, Context context, Iterable<Tuple2<String, AppProtocol>> input, Collector<AppProtocol> output) throws Exception {
try {
Long endTime = context.window().getEnd() / 1000;
for (Tuple2<String, AppProtocol> tuple : input) {
AppProtocol data = tuple.f1;
data.setTimestamp(endTime);
output.collect(data);
}
} catch (RuntimeException e) {
logger.error("An exception occurred in the process of full data aggregation! The message is:" + e.getMessage());
}
}
}

View File

@@ -0,0 +1,91 @@
package com.zdjizhi.utils.general;
import com.alibaba.fastjson2.JSONObject;
import com.alibaba.fastjson2.JSONWriter;
import com.zdjizhi.common.pojo.AppProtocol;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.general
* @Description:
* @date 2023/5/519:04
*/
public class FormatConverterUtil {
/**
* 生成tags类型数据
*
* @param appProtocol 结果集
* @return tags结果
*/
public static JSONObject getTags(AppProtocol appProtocol) {
JSONObject tags = new JSONObject();
tags.fluentPut("vsys_id", appProtocol.getVsys_id())
.fluentPut("device_id", appProtocol.getDevice_id())
.fluentPut("device_group", appProtocol.getDevice_group())
.fluentPut("data_center", appProtocol.getData_center())
.fluentPut("protocol_stack_id", appProtocol.getProtocol_stack_id())
.fluentPut("app_name", appProtocol.getApp_name());
return tags;
}
/**
* 将数据结构转换为最终的结构
*
* @param appProtocol 结果集
* @return 结果数据
*/
public static JSONObject structureConversion(AppProtocol appProtocol) {
JSONObject metrics = new JSONObject();
JSONObject fields = new JSONObject();
fields.fluentPut("sessions", appProtocol.getSessions())
.fluentPut("in_bytes", appProtocol.getIn_bytes())
.fluentPut("out_bytes", appProtocol.getOut_bytes())
.fluentPut("in_pkts", appProtocol.getIn_pkts())
.fluentPut("out_pkts", appProtocol.getOut_pkts())
.fluentPut("c2s_bytes", appProtocol.getC2s_bytes())
.fluentPut("s2c_bytes", appProtocol.getS2c_bytes())
.fluentPut("c2s_pkts", appProtocol.getC2s_pkts())
.fluentPut("s2c_pkts", appProtocol.getS2c_pkts())
.fluentPut("c2s_fragments", appProtocol.getC2s_fragments())
.fluentPut("s2c_fragments", appProtocol.getS2c_fragments())
.fluentPut("c2s_tcp_lost_bytes", appProtocol.getC2s_tcp_lost_bytes())
.fluentPut("s2c_tcp_lost_bytes", appProtocol.getS2c_tcp_lost_bytes())
.fluentPut("c2s_tcp_ooorder_pkts", appProtocol.getC2s_tcp_ooorder_pkts())
.fluentPut("s2c_tcp_ooorder_pkts", appProtocol.getS2c_tcp_ooorder_pkts())
.fluentPut("c2s_tcp_retransmitted_pkts", appProtocol.getC2s_tcp_retransmitted_bytes())
.fluentPut("s2c_tcp_retransmitted_pkts", appProtocol.getS2c_tcp_retransmitted_bytes())
.fluentPut("c2s_tcp_retransmitted_bytes", appProtocol.getC2s_tcp_retransmitted_pkts())
.fluentPut("s2c_tcp_retransmitted_bytes", appProtocol.getS2c_tcp_retransmitted_pkts())
.fluentPut("client_ip_sketch", appProtocol.getClient_ip_sketch());
metrics.put("timestamp", appProtocol.getTimestamp());
metrics.put("name", "application_protocol_stat");
metrics.fluentPut("timestamp", appProtocol.getTimestamp())
.fluentPut("name", "application_protocol_stat")
.fluentPut("fields", fields);
return metrics;
}
/**
* 更新结果集tags数据不同协议层级并输出json
*
* @param conversion 结果集
* @param tags tags结果
* @return 结果json
*/
public static String updateTagsData(JSONObject conversion, JSONObject tags) {
conversion.put("tags", tags);
return JSONObject.toJSONString(conversion
, JSONWriter.Feature.WriteNullStringAsEmpty
, JSONWriter.Feature.WriteNullNumberAsZero);
}
}

View File

@@ -0,0 +1,111 @@
package com.zdjizhi.utils.general;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.zdjizhi.common.pojo.AppProtocol;
import com.zdjizhi.utils.StringUtil;
import org.apache.datasketches.hll.HllSketch;
import org.apache.datasketches.hll.Union;
import java.util.Base64;
/**
* @author qidaijie
* @Package com.zdjizhi.storm.utils.general
* @Description:
* @date 2021/7/2015:31
*/
public class MetricUtil {
private static final Log logger = LogFactory.get();
/**
* 用于对业务指标进行统计
*
* @param cacheData 缓存中数据
* @param newData 新数据
*/
public static void statisticsMetrics(AppProtocol cacheData, AppProtocol newData) {
cacheData.setSessions(MetricUtil.longSum(cacheData.getSessions(), newData.getSessions()));
cacheData.setIn_bytes(MetricUtil.longSum(cacheData.getIn_bytes(), newData.getIn_bytes()));
cacheData.setOut_pkts(MetricUtil.longSum(cacheData.getOut_bytes(), newData.getOut_bytes()));
cacheData.setIn_pkts(MetricUtil.longSum(cacheData.getIn_pkts(), newData.getIn_pkts()));
cacheData.setOut_pkts(MetricUtil.longSum(cacheData.getOut_pkts(), newData.getOut_pkts()));
cacheData.setC2s_bytes(MetricUtil.longSum(cacheData.getC2s_bytes(), newData.getC2s_bytes()));
cacheData.setS2c_bytes(MetricUtil.longSum(cacheData.getS2c_bytes(), newData.getS2c_bytes()));
cacheData.setC2s_pkts(MetricUtil.longSum(cacheData.getC2s_pkts(), newData.getC2s_pkts()));
cacheData.setS2c_pkts(MetricUtil.longSum(cacheData.getS2c_pkts(), newData.getS2c_pkts()));
cacheData.setC2s_fragments(MetricUtil.longSum(cacheData.getC2s_fragments(), newData.getC2s_fragments()));
cacheData.setS2c_fragments(MetricUtil.longSum(cacheData.getS2c_fragments(), newData.getS2c_fragments()));
cacheData.setC2s_tcp_lost_bytes(MetricUtil.longSum(cacheData.getC2s_tcp_lost_bytes(), newData.getC2s_tcp_lost_bytes()));
cacheData.setS2c_tcp_lost_bytes(MetricUtil.longSum(cacheData.getS2c_tcp_lost_bytes(), newData.getS2c_tcp_lost_bytes()));
cacheData.setC2s_tcp_ooorder_pkts(MetricUtil.longSum(cacheData.getC2s_tcp_ooorder_pkts(), newData.getC2s_tcp_ooorder_pkts()));
cacheData.setS2c_tcp_ooorder_pkts(MetricUtil.longSum(cacheData.getS2c_tcp_ooorder_pkts(), newData.getS2c_tcp_ooorder_pkts()));
cacheData.setC2s_tcp_retransmitted_bytes(MetricUtil.longSum(cacheData.getC2s_tcp_retransmitted_bytes(), newData.getC2s_tcp_retransmitted_bytes()));
cacheData.setS2c_tcp_retransmitted_bytes(MetricUtil.longSum(cacheData.getS2c_tcp_retransmitted_bytes(), newData.getS2c_tcp_retransmitted_bytes()));
cacheData.setC2s_tcp_retransmitted_pkts(MetricUtil.longSum(cacheData.getC2s_tcp_retransmitted_pkts(), newData.getC2s_tcp_retransmitted_pkts()));
cacheData.setS2c_tcp_retransmitted_pkts(MetricUtil.longSum(cacheData.getS2c_tcp_retransmitted_pkts(), newData.getS2c_tcp_retransmitted_pkts()));
cacheData.setClient_ip_sketch(MetricUtil.hllSketchUnion(cacheData.getClient_ip_sketch(), newData.getClient_ip_sketch()));
}
/**
* Long类型的数据求和
*
* @param value1 第一个值
* @param value2 第二个值
* @return value1 + value2
*/
private static Long longSum(Long value1, Long value2) {
Long result = 0L;
try {
if (value1 > 0 && value2 > 0) {
result = value1 + value2;
} else {
result = value1;
}
} catch (RuntimeException e) {
logger.error("Abnormal sending of traffic indicator statistics! The message is:" + e.getMessage());
result = value1;
}
return result;
}
/**
* @param cacheHll 缓存的sketch
* @param newHll 聚合后的sketch
* @return 合并后的sketch
*/
private static String hllSketchUnion(String cacheHll, String newHll) {
Union union = new Union(12);
try {
if (StringUtil.isNotBlank(cacheHll)) {
byte[] cacheHllBytes = Base64.getDecoder().decode(cacheHll);
HllSketch cacheSketch = HllSketch.heapify(cacheHllBytes);
union.update(cacheSketch);
}
if (StringUtil.isNotBlank(newHll)) {
byte[] newHllBytes = Base64.getDecoder().decode(newHll);
HllSketch newSketch = HllSketch.heapify(newHllBytes);
union.update(newSketch);
}
return Base64.getEncoder().encodeToString(union.getResult().toUpdatableByteArray());
} catch (RuntimeException e) {
logger.error("Merge hllSketch results abnormal! The message is:" + e.getMessage());
return null;
}
}
}

View File

@@ -0,0 +1,48 @@
package com.zdjizhi.utils.kafka;
import com.zdjizhi.common.config.GlobalConfig;
import org.apache.kafka.common.config.SslConfigs;
import java.util.Properties;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.kafka
* @Description:
* @date 2021/9/610:37
*/
class CertUtils {
/**
* Kafka SASL认证端口
*/
private static final String SASL_PORT = "9094";
/**
* Kafka SSL认证端口
*/
private static final String SSL_PORT = "9095";
/**
* 根据连接信息端口判断认证方式。
*
* @param servers kafka 连接信息
* @param properties kafka 连接配置信息
*/
static void chooseCert(String servers, Properties properties) {
if (servers.contains(SASL_PORT)) {
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "PLAIN");
properties.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username="
+ GlobalConfig.KAFKA_SASL_JAAS_USER + " password=" + GlobalConfig.KAFKA_SASL_JAAS_PIN + ";");
} else if (servers.contains(SSL_PORT)) {
properties.put("security.protocol", "SSL");
properties.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");
properties.put("ssl.keystore.location", GlobalConfig.TOOLS_LIBRARY + "keystore.jks");
properties.put("ssl.keystore.password", GlobalConfig.KAFKA_SASL_JAAS_PIN);
properties.put("ssl.truststore.location", GlobalConfig.TOOLS_LIBRARY + "truststore.jks");
properties.put("ssl.truststore.password", GlobalConfig.KAFKA_SASL_JAAS_PIN);
properties.put("ssl.key.password", GlobalConfig.KAFKA_SASL_JAAS_PIN);
}
}
}

View File

@@ -0,0 +1,47 @@
package com.zdjizhi.utils.kafka;
import com.zdjizhi.common.config.GlobalConfig;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import java.util.Properties;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.kafka
* @Description:
* @date 2021/6/813:54
*/
public class KafkaConsumer {
private static Properties createConsumerConfig() {
Properties properties = new Properties();
properties.put("bootstrap.servers", GlobalConfig.SOURCE_KAFKA_SERVERS);
properties.put("group.id", GlobalConfig.GROUP_ID);
properties.put("session.timeout.ms", GlobalConfig.SESSION_TIMEOUT_MS);
properties.put("max.poll.records", GlobalConfig.MAX_POLL_RECORDS);
properties.put("max.partition.fetch.bytes", GlobalConfig.MAX_PARTITION_FETCH_BYTES);
properties.put("partition.discovery.interval.ms", "10000");
CertUtils.chooseCert(GlobalConfig.SOURCE_KAFKA_SERVERS, properties);
return properties;
}
/**
* 官方序列化kafka数据
*
* @return kafka logs
*/
public static FlinkKafkaConsumer<String> getKafkaConsumer() {
FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(GlobalConfig.SOURCE_KAFKA_TOPIC,
new SimpleStringSchema(), createConsumerConfig());
//随着checkpoint提交将offset提交到kafka
kafkaConsumer.setCommitOffsetsOnCheckpoints(true);
//从消费组当前的offset开始消费
kafkaConsumer.setStartFromGroupOffsets();
return kafkaConsumer;
}
}

View File

@@ -0,0 +1,48 @@
package com.zdjizhi.utils.kafka;
import com.zdjizhi.common.config.GlobalConfig;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import java.util.Optional;
import java.util.Properties;
/**
* @author qidaijie
* @Package com.zdjizhi.utils.kafka
* @Description:
* @date 2021/6/814:04
*/
public class KafkaProducer {
private static Properties createProducerConfig() {
Properties properties = new Properties();
properties.put("bootstrap.servers", GlobalConfig.SINK_KAFKA_SERVERS);
properties.put("acks", GlobalConfig.PRODUCER_ACK);
properties.put("retries", GlobalConfig.RETRIES);
properties.put("linger.ms", GlobalConfig.LINGER_MS);
properties.put("request.timeout.ms", GlobalConfig.REQUEST_TIMEOUT_MS);
properties.put("batch.size", GlobalConfig.BATCH_SIZE);
properties.put("buffer.memory", GlobalConfig.BUFFER_MEMORY);
properties.put("max.request.size", GlobalConfig.MAX_REQUEST_SIZE);
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, GlobalConfig.PRODUCER_KAFKA_COMPRESSION_TYPE);
CertUtils.chooseCert(GlobalConfig.SINK_KAFKA_SERVERS, properties);
return properties;
}
public static FlinkKafkaProducer<String> getKafkaProducer() {
FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<String>(
GlobalConfig.SINK_KAFKA_TOPIC,
new SimpleStringSchema(),
createProducerConfig(), Optional.empty());
//启用此选项将使生产者仅记录失败日志而不是捕获和重新抛出它们
kafkaProducer.setLogFailuresOnly(true);
return kafkaProducer;
}
}