toJSONString替换为fastjson工具类

This commit is contained in:
qidaijie
2021-11-11 09:14:09 +03:00
parent 8bf733385f
commit 60e4bcfca0
10 changed files with 67 additions and 45 deletions

View File

@@ -1,23 +1,4 @@
#producer<EFBFBD><EFBFBD><EFBFBD>ԵĴ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
retries=0
#<23><><EFBFBD>ĺ<EFBFBD><C4BA><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>˵һ<CBB5><D2BB>Batch<63><68><EFBFBD><EFBFBD><EFBFBD><EFBFBD>֮<EFBFBD><D6AE><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ã<EFBFBD><C3A3><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Batch<63><68>û<EFBFBD><C3BB>д<EFBFBD><D0B4><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͳ<EFBFBD>ȥ<EFBFBD><C8A5>
linger.ms=10
#<23><><EFBFBD><EFBFBD><EFBFBD>ڳ<EFBFBD>ʱ֮ǰδ<C7B0>յ<EFBFBD><D5B5><EFBFBD>Ӧ<EFBFBD><D3A6><EFBFBD>ͻ<EFBFBD><CDBB>˽<EFBFBD><CBBD>ڱ<EFBFBD>Ҫʱ<D2AA><CAB1><EFBFBD>·<EFBFBD><C2B7><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
request.timeout.ms=30000
#producer<65><72><EFBFBD>ǰ<EFBFBD><C7B0><EFBFBD>batch<63><68><EFBFBD>з<EFBFBD><D0B7>͵<EFBFBD>,<2C><><EFBFBD>δ<EFBFBD>С<EFBFBD><D0A1>Ĭ<EFBFBD><C4AC>:16384
batch.size=262144
#Producer<65><72><EFBFBD><EFBFBD><EFBFBD>ڻ<EFBFBD><DABB><EFBFBD><EFBFBD><EFBFBD>Ϣ<EFBFBD>Ļ<EFBFBD><C4BB><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С
#128M
buffer.memory=134217728
#<23><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ÿ<EFBFBD>η<EFBFBD><CEB7>͸<EFBFBD>Kafka<6B><61><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С<><C4AC>1048576
#10M
max.request.size=10485760
#====================Kafka Consumer====================#
#kafka source connection timeout
session.timeout.ms=60000
@@ -26,24 +7,48 @@ max.poll.records=3000
#kafka source poll bytes
max.partition.fetch.bytes=31457280
#====================Kafka Producer====================#
#producer重试的次数设置
retries=0
#hbase table name
hbase.table.name=subscriber_info
#他的含义就是说一个Batch被创建之后最多过多久不管这个Batch有没有写满都必须发送出去了
linger.ms=10
#<EFBFBD>ʼ<EFBFBD>Ĭ<EFBFBD>ϱ<EFBFBD><EFBFBD><EFBFBD>
mail.default.charset=UTF-8
#如果在超时之前未收到响应,客户端将在必要时重新发送请求
request.timeout.ms=30000
#0<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>κ<EFBFBD>У<EFBFBD>飬1ǿ<EFBFBD><EFBFBD><EFBFBD><EFBFBD>У<EFBFBD>飬2<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>У<EFBFBD><EFBFBD>
log.transform.type=0
#producer都是按照batch进行发送的,批次大小,默认:16384
batch.size=262144
#Producer端用于缓存消息的缓冲区大小
#128M
buffer.memory=134217728
#这个参数决定了每次发送给Kafka服务器请求的最大大小,默认1048576
#10M
max.request.size=10485760
#====================kafka default====================#
#kafka source protocol; SSL or SASL
kafka.source.protocol=SASL
#kafka sink protocol; SSL or SASL
kafka.sink.protocol=SSL
#kafka SASL<EFBFBD><EFBFBD>֤<EFBFBD>û<EFBFBD><EFBFBD><EFBFBD>
#kafka SASL验证用户名
kafka.user=admin
#kafka SASL<EFBFBD><EFBFBD>SSL<EFBFBD><EFBFBD>֤<EFBFBD><EFBFBD><EFBFBD><EFBFBD>
kafka.pin=galaxy2019
#kafka SASLSSL验证密码
kafka.pin=galaxy2019
#====================Topology Default====================#
#hbase table name
hbase.table.name=subscriber_info
#邮件默认编码
mail.default.charset=UTF-8
#0不做任何校验1强类型校验2弱类型校验
log.transform.type=2
#两个输出之间的最大时间(单位milliseconds)
buffer.timeout=100000

View File

@@ -1,7 +1,7 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=10.224.11.14:9094,10.224.11.15:9094,10.224.11.16:9094,10.224.11.17:9094,10.224.11.18:9094,10.224.11.19:9094,10.224.11.20:9094,10.224.11.21:9094,10.224.11.22:9094,10.224.11.23:9094
source.kafka.servers=10.231.12.4:9094
#管理输出kafka地址
sink.kafka.servers=10.224.11.14:9094,10.224.11.15:9094,10.224.11.16:9094,10.224.11.17:9094,10.224.11.18:9094,10.224.11.19:9094,10.224.11.20:9094,10.224.11.21:9094,10.224.11.22:9094,10.224.11.23:9094
@@ -10,7 +10,7 @@ sink.kafka.servers=10.224.11.14:9094,10.224.11.15:9094,10.224.11.16:9094,10.224.
zookeeper.servers=10.224.11.11:2181,10.224.11.12:2181,10.224.11.13:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=10.224.11.11:2181,10.224.11.12:2181,10.224.11.13:2181
hbase.zookeeper.servers=10.231.12.4:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
@@ -25,13 +25,13 @@ app.id.http=http://10.224.11.244:9999/open-api/appDicList
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=test
source.kafka.topic=SESSION-RECORD
#补全数据 输出 topic
sink.kafka.topic=test-result
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=flink-test
group.id=flink-test-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none