修改配置文件名称

修改统计逻辑两层窗口计算
This commit is contained in:
qidaijie
2021-11-20 11:30:08 +03:00
parent 49f78a2f49
commit 2a32156c9e
15 changed files with 275 additions and 126 deletions

View File

@@ -1,29 +1,45 @@
#producer<EFBFBD><EFBFBD><EFBFBD>ԵĴ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#====================Kafka Consumer====================#
#kafka source connection timeout
session.timeout.ms=60000
#kafka source poll
max.poll.records=3000
#kafka source poll bytes
max.partition.fetch.bytes=31457280
#====================Kafka Producer====================#
#producer重试的次数设置
retries=0
#<EFBFBD><EFBFBD><EFBFBD>ĺ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>˵һ<EFBFBD><EFBFBD>Batch<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>֮<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ã<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Batch<EFBFBD><EFBFBD>û<EFBFBD><EFBFBD>д<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ͳ<EFBFBD>ȥ<EFBFBD><EFBFBD>
linger.ms=5
#他的含义就是说一个Batch被创建之后最多过多久不管这个Batch有没有写满都必须发送出去了
linger.ms=10
#<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڳ<EFBFBD>ʱ֮ǰδ<EFBFBD>յ<EFBFBD><EFBFBD><EFBFBD>Ӧ<EFBFBD><EFBFBD><EFBFBD>ͻ<EFBFBD><EFBFBD>˽<EFBFBD><EFBFBD>ڱ<EFBFBD>Ҫʱ<EFBFBD><EFBFBD><EFBFBD>·<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
#如果在超时之前未收到响应,客户端将在必要时重新发送请求
request.timeout.ms=30000
#producer<EFBFBD><EFBFBD><EFBFBD>ǰ<EFBFBD><EFBFBD><EFBFBD>batch<EFBFBD><EFBFBD><EFBFBD>з<EFBFBD><EFBFBD>͵<EFBFBD>,<2C><><EFBFBD>δ<EFBFBD>С<EFBFBD><D0A1>Ĭ<EFBFBD><C4AC>:16384
#producer都是按照batch进行发送的,批次大小,默认:16384
batch.size=262144
#Producer<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڻ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϣ<EFBFBD>Ļ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С
buffer.memory=67108864
#<23><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ÿ<EFBFBD>η<EFBFBD><CEB7>͸<EFBFBD>Kafka<6B><61><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>С<><C4AC>1048576
max.request.size=5242880
#kafka SASL<53><4C>֤<EFBFBD>û<EFBFBD><C3BB><EFBFBD>
kafka.user=admin
#kafka SASL<53><4C>SSL<53><4C>֤<EFBFBD><D6A4><EFBFBD><EFBFBD>
kafka.pin=galaxy2019
#Producer端用于缓存消息的缓冲区大小
#128M
buffer.memory=134217728
#这个参数决定了每次发送给Kafka服务器请求的最大大小,默认1048576
#10M
max.request.size=10485760
#====================kafka default====================#
#kafka source protocol; SSL or SASL
kafka.source.protocol=SASL
#kafka sink protocol; SSL or SASL
kafka.sink.protocol=SASL
kafka.sink.protocol=SASL
#kafka SASL验证用户名
kafka.user=admin
#kafka SASL及SSL验证密码
kafka.pin=galaxy2019
#====================Topology Default====================#
#两个输出之间的最大时间(单位milliseconds)
buffer.timeout=100

View File

@@ -1,34 +1,33 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
input.kafka.servers=192.168.44.12:9094
#source.kafka.servers=10.224.11.14:9094,10.224.11.15:9094,10.224.11.16:9094,10.224.11.17:9094,10.224.11.18:9094,10.224.11.19:9094,10.224.11.20:9094,10.224.11.21:9094,10.224.11.22:9094,10.224.11.23:9094
source.kafka.servers=10.221.12.4:9094
#管理输出kafka地址
output.kafka.servers=192.168.44.12:9094
sink.kafka.servers=10.224.11.14:9094,10.224.11.15:9094,10.224.11.16:9094,10.224.11.17:9094,10.224.11.18:9094,10.224.11.19:9094,10.224.11.20:9094,10.224.11.21:9094,10.224.11.22:9094,10.224.11.23:9094
#--------------------------------HTTP------------------------------#
#kafka 证书地址
tools.library=D:\\K18-Phase2\\tsgSpace\\dat\\tsg\\
tools.library=D:\\workerspace\\dat\\
#网关的schema位置
#schema.http=http://192.168.44.12:9999/metadata/schema/v1/fields/liveChart_interim
schema.http=http://192.168.44.67:9999/metadata/schema/v1/fields/liveChart_session
schema.http=http://10.224.11.244:9999/metadata/schema/v1/fields/liveChart_session
#网关APP_ID 获取接口
app.id.http=http://192.168.44.67:9999/open-api/appDicList
app.id.http=http://10.224.11.244:9999/open-api/appDicList
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
input.kafka.topic=test
#input.kafka.topic=SESSION-RECORD
#input.kafka.topic=INTERIM-SESSION-RECORD
source.kafka.topic=SESSION-RECORD
#source.kafka.topic=test
#补全数据 输出 topic
output.kafka.topic=test-result
sink.kafka.topic=test-result
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=liveCharts-session-test-20210811-1
group.id=mytest-211119-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
@@ -39,13 +38,21 @@ producer.ack=1
#--------------------------------topology配置------------------------------#
#consumer 并行度
consumer.parallelism=1
source.parallelism=1
#map函数并行度
parse.parallelism=1
parse.parallelism=2
#count 函数并行度
first.window.parallelism=2
second.window.parallelism=2
#producer 并行度
sink.parallelism=1
#app_id 更新时间如填写0则不更新缓存
app.tick.tuple.freq.secs=0
#聚合窗口时间
count.window.time=15
count.window.time=15