代码优化,使用定时线程池刷新写入clickhouse
This commit is contained in:
@@ -7,8 +7,9 @@ import com.zdjizhi.etl.dns.SketchTimeMapFunction;
|
||||
import com.zdjizhi.utils.kafka.KafkaConsumer;
|
||||
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
|
||||
import org.apache.flink.streaming.api.datastream.DataStream;
|
||||
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
|
||||
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
|
||||
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
|
||||
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
|
||||
import org.apache.flink.streaming.api.windowing.time.Time;
|
||||
|
||||
import java.time.Duration;
|
||||
@@ -20,22 +21,23 @@ import static com.zdjizhi.common.FlowWriteConfig.*;
|
||||
|
||||
public class ConnLogService {
|
||||
|
||||
public static void connLogStream(StreamExecutionEnvironment env) throws Exception{
|
||||
public static void connLogStream(StreamExecutionEnvironment env) throws Exception {
|
||||
//connection
|
||||
DataStream<Map<String, Object>> connSource = getLogSource(env, SOURCE_KAFKA_TOPIC_CONNECTION);
|
||||
//sketch
|
||||
DataStream<Map<String, Object>> sketchSource = getLogSource(env, SOURCE_KAFKA_TOPIC_SKETCH);
|
||||
|
||||
//写入CKsink,批量处理
|
||||
LogService.getLogCKSink(connSource, SINK_CK_TABLE_CONNECTION);
|
||||
|
||||
LogService.getLogCKSink(sketchSource, SINK_CK_TABLE_SKETCH);
|
||||
|
||||
//transform
|
||||
DataStream<Map<String, Object>> connTransformStream = getConnTransformStream(connSource);
|
||||
|
||||
//写入ck通联relation表
|
||||
LogService.getLogCKSink(connTransformStream, SINK_CK_TABLE_RELATION_CONNECTION);
|
||||
if (SINK_CK_RAW_LOG_INSERT_OPEN == 1) {
|
||||
//写入CKsink,批量处理
|
||||
LogService.getLogCKSink(connSource, SINK_CK_TABLE_CONNECTION);
|
||||
LogService.getLogCKSink(sketchSource, SINK_CK_TABLE_SKETCH);
|
||||
//写入ck通联relation表
|
||||
LogService.getLogCKSink(connTransformStream, SINK_CK_TABLE_RELATION_CONNECTION);
|
||||
} else {
|
||||
LogService.getLogKafkaSink(connTransformStream, SINK_KAFKA_TOPIC_RELATION_CONNECTION);
|
||||
}
|
||||
|
||||
DataStream<Map<String, Object>> sketchTransformStream = getSketchTransformStream(sketchSource);
|
||||
|
||||
@@ -57,14 +59,31 @@ public class ConnLogService {
|
||||
|
||||
String timeFilter = SOURCE_KAFKA_TOPIC_CONNECTION.equals(source) ? "conn_start_time" : "sketch_start_time";
|
||||
|
||||
DataStream<Map<String, Object>> sourceStream = env.addSource(KafkaConsumer.myDeserializationConsumer(source))
|
||||
SingleOutputStreamOperator<Map<String, Object>> filterStream = env.addSource(KafkaConsumer.myDeserializationConsumer(source))
|
||||
.setParallelism(SOURCE_PARALLELISM)
|
||||
.filter(x -> Objects.nonNull(x) && Convert.toLong(x.get(timeFilter)) > 0)
|
||||
.filter(x -> {
|
||||
if (Objects.isNull(x) || Convert.toLong(x.get(timeFilter)) <= 0) {
|
||||
return false;
|
||||
}
|
||||
if (SOURCE_KAFKA_TOPIC_CONNECTION.equals(source)) {
|
||||
if (String.valueOf(x.get("total_cs_pkts")).length() >= AGGREGATE_MAX_VALUE_LENGTH || String.valueOf(x.get("total_sc_pkts")).length() >= AGGREGATE_MAX_VALUE_LENGTH ||
|
||||
String.valueOf(x.get("total_cs_bytes")).length() >= AGGREGATE_MAX_VALUE_LENGTH || String.valueOf(x.get("total_sc_bytes")).length() >= AGGREGATE_MAX_VALUE_LENGTH) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else if (SOURCE_KAFKA_TOPIC_SKETCH.equals(source)) {
|
||||
if (String.valueOf(x.get("sketch_sessions")).length() >= AGGREGATE_MAX_VALUE_LENGTH || String.valueOf(x.get("sketch_packets")).length() >= AGGREGATE_MAX_VALUE_LENGTH ||
|
||||
String.valueOf(x.get("sketch_bytes")).length() >= AGGREGATE_MAX_VALUE_LENGTH) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}).setParallelism(SOURCE_PARALLELISM);
|
||||
DataStream<Map<String, Object>> sourceStream = filterStream.map(SOURCE_KAFKA_TOPIC_CONNECTION.equals(source) ? new ConnTimeMapFunction() : new SketchTimeMapFunction())
|
||||
.setParallelism(SOURCE_PARALLELISM)
|
||||
.map(SOURCE_KAFKA_TOPIC_CONNECTION.equals(source) ? new ConnTimeMapFunction() : new SketchTimeMapFunction())
|
||||
.setParallelism(SOURCE_PARALLELISM)
|
||||
.name(source)
|
||||
.setParallelism(SOURCE_PARALLELISM);
|
||||
.name(source);
|
||||
return sourceStream;
|
||||
}
|
||||
|
||||
@@ -77,7 +96,7 @@ public class ConnLogService {
|
||||
}))
|
||||
.setParallelism(TRANSFORM_PARALLELISM)
|
||||
.keyBy(new IpKeysSelector())
|
||||
.window(TumblingProcessingTimeWindows.of(Time.seconds(LOG_AGGREGATE_DURATION)))
|
||||
.window(TumblingEventTimeWindows.of(Time.seconds(LOG_AGGREGATE_DURATION)))
|
||||
.process(new ConnProcessFunction())
|
||||
.setParallelism(TRANSFORM_PARALLELISM);
|
||||
return connTransformStream;
|
||||
@@ -88,7 +107,7 @@ public class ConnLogService {
|
||||
.<Map<String, Object>>forBoundedOutOfOrderness(Duration.ofSeconds(FLINK_WATERMARK_MAX_DELAY_TIME))
|
||||
.withTimestampAssigner((event, timestamp) -> TypeUtils.castToLong(event.get("sketch_start_time")) * 1000))
|
||||
.keyBy(new IpKeysSelector())
|
||||
.window(TumblingProcessingTimeWindows.of(Time.seconds(LOG_AGGREGATE_DURATION)))
|
||||
.window(TumblingEventTimeWindows.of(Time.seconds(LOG_AGGREGATE_DURATION)))
|
||||
.process(new SketchProcessFunction());
|
||||
return sketchTransformStream;
|
||||
}
|
||||
@@ -96,7 +115,7 @@ public class ConnLogService {
|
||||
private static DataStream<Map<String, Object>> getConnUnion(DataStream<Map<String, Object>> connTransformStream, DataStream<Map<String, Object>> sketchTransformStream) throws Exception {
|
||||
DataStream<Map<String, Object>> ip2ipGraph = connTransformStream.union(sketchTransformStream)
|
||||
.keyBy(new IpKeysSelector())
|
||||
.window(TumblingProcessingTimeWindows.of(Time.seconds(LOG_AGGREGATE_DURATION_GRAPH)))
|
||||
.window(TumblingEventTimeWindows.of(Time.seconds(LOG_AGGREGATE_DURATION_GRAPH)))
|
||||
.process(new Ip2IpGraphProcessFunction())
|
||||
.setParallelism(TRANSFORM_PARALLELISM);
|
||||
return ip2ipGraph;
|
||||
|
||||
Reference in New Issue
Block a user