toJSONString替换为fastjson工具类

This commit is contained in:
qidaijie
2021-11-11 09:14:09 +03:00
parent 8bf733385f
commit 60e4bcfca0
10 changed files with 67 additions and 45 deletions

View File

@@ -29,6 +29,7 @@ public class FlowWriteConfig {
public static final String MAIL_DEFAULT_CHARSET = FlowWriteConfigurations.getStringProperty(0, "mail.default.charset");
public static final String HBASE_TABLE_NAME = FlowWriteConfigurations.getStringProperty(1, "hbase.table.name");
public static final Integer LOG_TRANSFORM_TYPE = FlowWriteConfigurations.getIntProperty(1, "log.transform.type");
public static final Integer BUFFER_TIMEOUT = FlowWriteConfigurations.getIntProperty(1, "buffer.timeout");
/**
* kafka source config

View File

@@ -28,8 +28,8 @@ public class LogFlowWriteTopology {
//开启Checkpointinterval用于指定checkpoint的触发间隔(单位milliseconds)
// environment.enableCheckpointing(5000);
//
environment.setBufferTimeout(5000);
//两个输出之间的最大时间 (单位milliseconds)
environment.setBufferTimeout(FlowWriteConfig.BUFFER_TIMEOUT);
DataStreamSource<String> streamSource = environment.addSource(Consumer.getKafkaConsumer())
.setParallelism(FlowWriteConfig.SOURCE_PARALLELISM);
@@ -41,26 +41,31 @@ public class LogFlowWriteTopology {
//对原始日志进行处理补全转换等,不对日志字段类型做校验。
cleaningLog = streamSource.map(new MapCompletedFunction()).name("MapCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
break;
case 1:
//对原始日志进行处理补全转换等强制要求日志字段类型与schema一致。
cleaningLog = streamSource.map(new ObjectCompletedFunction()).name("ObjectCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
break;
case 2:
//对原始日志进行处理补全转换等对日志字段类型做若校验可根据schema进行强转。
cleaningLog = streamSource.map(new TypeMapCompletedFunction()).name("TypeMapCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
break;
default:
//对原始日志进行处理补全转换等,不对日志字段类型做校验。
cleaningLog = streamSource.map(new MapCompletedFunction()).name("MapCompletedFunction")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
}
//过滤空数据不发送到Kafka内
// //过滤空数据不发送到Kafka内
DataStream<String> result = cleaningLog.filter(new FilterNullFunction()).name("FilterAbnormalData")
.setParallelism(FlowWriteConfig.TRANSFORM_PARALLELISM);
//发送数据到Kafka
result.addSink(Producer.getKafkaProducer()).name("LogSinkKafka")
.setParallelism(FlowWriteConfig.SINK_PARALLELISM);

View File

@@ -54,11 +54,11 @@ public class TransFormMap {
}
return JsonMapper.toJsonString(jsonMap);
} else {
return "";
return null;
}
} catch (RuntimeException e) {
logger.error("解析补全日志信息过程异常,异常信息:" + e + "\n" + message);
return "";
return null;
}
}

View File

@@ -62,11 +62,11 @@ public class TransFormObject {
}
return JsonMapper.toJsonString(object);
} else {
return "";
return null;
}
} catch (RuntimeException e) {
logger.error("解析补全日志信息过程异常,异常信息:" + e + "\n" + message);
return "";
return null;
}
}

View File

@@ -3,6 +3,8 @@ package com.zdjizhi.utils.general;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.zdjizhi.common.FlowWriteConfig;
import com.zdjizhi.utils.JsonMapper;
import com.zdjizhi.utils.StringUtil;
@@ -12,6 +14,8 @@ import com.zdjizhi.utils.json.JsonTypeUtils;
import java.util.ArrayList;
import java.util.Map;
import static com.alibaba.fastjson.serializer.SerializerFeature.WriteMapNullValue;
/**
* 描述:转换或补全工具类
@@ -53,13 +57,19 @@ public class TransFormTypeMap {
String param = strings[3];
functionSet(function, jsonMap, appendToKeyName, appendToKeyValue, logValue, param);
}
return JsonMapper.toJsonString(jsonMap);
// return JsonMapper.toJsonString(jsonMap);
//fastjson test
return JSONObject.toJSONString(jsonMap,
SerializerFeature.DisableCircularReferenceDetect
,SerializerFeature.WriteNullStringAsEmpty
,SerializerFeature.WriteNullNumberAsZero);
} else {
return "";
return null;
}
} catch (RuntimeException e) {
logger.error("解析补全日志信息过程异常,异常信息:" + e + "\n" + message);
return "";
return null;
}
}

View File

@@ -5,6 +5,7 @@ import com.zdjizhi.common.FlowWriteConfig;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.config.SslConfigs;
@@ -26,7 +27,6 @@ public class Consumer {
properties.put("max.partition.fetch.bytes", FlowWriteConfig.MAX_PARTITION_FETCH_BYTES);
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
CertUtils.chooseCert(FlowWriteConfig.KAFKA_SOURCE_PROTOCOL,properties);
return properties;

View File

@@ -43,6 +43,7 @@ public class Producer {
kafkaProducer.setLogFailuresOnly(false);
// kafkaProducer.setWriteTimestampToKafka(true);
return kafkaProducer;