提交初版连接Nacos动态获取schema代码。 GAL-144

This commit is contained in:
qidaijie
2022-03-22 11:46:34 +08:00
parent 3f6af58d78
commit 5ab76e4335
22 changed files with 733 additions and 339 deletions

View File

@@ -10,6 +10,7 @@ import com.zdjizhi.utils.kafka.KafkaConsumer;
import com.zdjizhi.utils.kafka.KafkaProducer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import java.util.Map;
@@ -30,8 +31,9 @@ public class LogFlowWriteTopology {
environment.setBufferTimeout(FlowWriteConfig.BUFFER_TIMEOUT);
if (FlowWriteConfig.LOG_NEED_COMPLETE == 1) {
DataStreamSource<Map<String, Object>> streamSource = environment.addSource(KafkaConsumer.myDeserializationConsumer())
.setParallelism(FlowWriteConfig.SOURCE_PARALLELISM);
SingleOutputStreamOperator<Map<String, Object>> streamSource = environment.addSource(KafkaConsumer.myDeserializationConsumer())
.setParallelism(FlowWriteConfig.SOURCE_PARALLELISM).name(FlowWriteConfig.SOURCE_KAFKA_TOPIC);
DataStream<String> cleaningLog;
switch (FlowWriteConfig.LOG_TRANSFORM_TYPE) {
@@ -56,7 +58,7 @@ public class LogFlowWriteTopology {
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
//发送数据到Kafka
result.addSink(KafkaProducer.getKafkaProducer()).name("LogSinkKafka")
result.addSink(KafkaProducer.getKafkaProducer()).name(FlowWriteConfig.SINK_KAFKA_TOPIC)
.setParallelism(FlowWriteConfig.SINK_PARALLELISM);
} else {
DataStreamSource<String> streamSource = environment.addSource(KafkaConsumer.flinkConsumer())
@@ -67,7 +69,7 @@ public class LogFlowWriteTopology {
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
//发送数据到Kafka
result.addSink(KafkaProducer.getKafkaProducer()).name("LogSinkKafka")
result.addSink(KafkaProducer.getKafkaProducer()).name(FlowWriteConfig.SINK_KAFKA_TOPIC)
.setParallelism(FlowWriteConfig.SINK_PARALLELISM);
}