1.适配TSG 23.07及以上功能,添加数据传输统计指标,并输出至pushgateway。(GAL-409)

2.原URL参数domain从http_domain字段取值,更新为从common_server_domain字段取值。(GAL-410)
This commit is contained in:
wangchengcheng
2023-09-28 15:59:26 +08:00
parent c3ad8140e8
commit 5c0a108393
77 changed files with 4726 additions and 2543 deletions

View File

@@ -2,66 +2,47 @@ package com.zdjizhi.topology;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson2.JSONObject;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.functions.DealFileProcessFunction;
import com.zdjizhi.utils.functions.FilterNullFunction;
import com.zdjizhi.utils.functions.MapCompletedFunction;
import com.zdjizhi.utils.functions.TypeMapCompletedFunction;
import com.zdjizhi.utils.kafka.KafkaConsumer;
import com.zdjizhi.utils.kafka.KafkaProducer;
import org.apache.flink.streaming.api.datastream.DataStream;
import com.zdjizhi.operator.count.SendCountProcess;
import com.zdjizhi.operator.map.MapCompleted;
import com.zdjizhi.operator.map.TypeMapCompleted;
import com.zdjizhi.operator.process.DealFileProcessFunction;
import com.zdjizhi.tools.connections.kafka.KafkaConsumer;
import com.zdjizhi.tools.connections.kafka.KafkaProducer;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Map;
/**
* @author 王成成
* @Package com.zdjizhi.topology
* @Description:
* @date 2022.06.01
*/
public class LogFlowWriteTopology {
private static final Log logger = LogFactory.get();
public static void main(String[] args) {
final StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
//两个输出之间的最大时间 (单位milliseconds)
environment.setBufferTimeout(FlowWriteConfig.BUFFER_TIMEOUT);
if (FlowWriteConfig.LOG_NEED_COMPLETE == 1) {
SingleOutputStreamOperator<JSONObject> completedStream;
if (FlowWriteConfig.LOG_TRANSFORM_TYPE == 0) {//不对日志字段类型做校验。
completedStream = environment.addSource(KafkaConsumer.flinkConsumer()).name(FlowWriteConfig.SOURCE_KAFKA_TOPIC).setParallelism(FlowWriteConfig.SOURCE_PARALLELISM)
.process(new MapCompleted()).name("MapCompletedFunction").setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
SingleOutputStreamOperator<Map<String, Object>> streamSource = environment.addSource(KafkaConsumer.myDeserializationConsumer())
.setParallelism(FlowWriteConfig.SOURCE_PARALLELISM).name(FlowWriteConfig.SOURCE_KAFKA_TOPIC);
} else {//对日志字段类型做弱校验可根据schema进行强转。
completedStream = environment.addSource(KafkaConsumer.flinkConsumer()).name(FlowWriteConfig.SOURCE_KAFKA_TOPIC).setParallelism(FlowWriteConfig.SOURCE_PARALLELISM)
.process(new TypeMapCompleted()).name("TypeMapCompletedFunction").setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
DataStream<Map<String, Object>> cleaningLog;
switch (FlowWriteConfig.LOG_TRANSFORM_TYPE) {
case 0:
//对原始日志进行处理补全转换等,不对日志字段类型做校验。
cleaningLog = streamSource.map(new MapCompletedFunction()).name("MapCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
break;
case 1:
//对原始日志进行处理补全转换等对日志字段类型做若校验可根据schema进行强转。
cleaningLog = streamSource.map(new TypeMapCompletedFunction()).name("TypeMapCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
break;
default:
//对原始日志进行处理补全转换等,不对日志字段类型做校验。
cleaningLog = streamSource.map(new MapCompletedFunction()).name("MapCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
}
//处理带有非结构化日志的数据
SingleOutputStreamOperator<String> process = cleaningLog.process(new DealFileProcessFunction());
SingleOutputStreamOperator<String> resultFileMetaData = process.getSideOutput(DealFileProcessFunction.metaToKafa).filter(new FilterNullFunction()).name("FilterAbnormalTrafficFileMetaData").setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
SingleOutputStreamOperator<String> result = process.filter(new FilterNullFunction()).name("FilterAbnormalData").setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
//文件元数据发送至TRAFFIC-FILE-METADATA
resultFileMetaData.addSink(KafkaProducer.getTrafficFileMetaKafkaProducer()).name("toTrafficFileMeta")
.setParallelism(FlowWriteConfig.FILE_DATA_SINK_PARALLELISM);
//补全后的数据发送给百分点的kafka
result.addSink(KafkaProducer.getPercentKafkaProducer()).name("toPercentKafka")
.setParallelism(FlowWriteConfig.PERCENT_SINK_PARALLELISM);
}
//处理带有非结构化文件字段的数据
SingleOutputStreamOperator<String> dealFileProcessFunction = completedStream.process(new DealFileProcessFunction()).name("DealFileProcessFunction").uid("DealFile-ProcessFunction").setParallelism(FlowWriteConfig.DEAL_FILE_PARALLELISM);
//补全后的数据发送至百分点的kafka
dealFileProcessFunction.addSink(KafkaProducer.getPercentKafkaProducer()).name("ToPercentKafka").uid("To-Percent-Kafka").setParallelism(FlowWriteConfig.SINK_PERCENT_PARALLELISM);
//文件元数据发送至TRAFFIC-FILE-METADATA
dealFileProcessFunction.getSideOutput(DealFileProcessFunction.metaToKafa).addSink(KafkaProducer.getTrafficFileMetaKafkaProducer()).name("toTrafficFileMeta").uid("to-Traffic-FileMeta").setParallelism(FlowWriteConfig.SINK_FILE_DATA_PARALLELISM);
dealFileProcessFunction.getSideOutput(DealFileProcessFunction.dealFileMetircTag).process(new SendCountProcess()).name("SendCountProcess").uid("Send-Count-Process").setParallelism(1);
try {
environment.execute(args[0]);
} catch (Exception e) {
@@ -71,4 +52,5 @@ public class LogFlowWriteTopology {
}
}