This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
galaxy-tsg-olap-log-complet…/properties/service_flow_config.properties

75 lines
2.1 KiB
Properties
Raw Normal View History

2021-08-23 17:05:17 +08:00
#--------------------------------地址配置------------------------------#
#管理kafka地址
input.kafka.servers=192.168.44.11:9092,192.168.44.14:9092,192.168.44.15:9092
#管理输出kafka地址
output.kafka.servers=192.168.44.11:9092,192.168.44.14:9092,192.168.44.15:9092
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
ip.library=/home/bigdata/topology/dat/
#网关的schema位置
schema.http=http://192.168.44.67:9999/metadata/schema/v1/fields/connection_record_log
#网关APP_ID 获取接口
app.id.http=http://192.168.44.67:9999/open-api/appDicList
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
input.kafka.topic=CONNECTION-RECORD-LOG
#补全数据 输出 topic
output.kafka.topic=CONNECTION-RECORD-COMPLETED-LOG
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=connection-record-flink-20210809
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#生产者ack
producer.ack=1
#接收自kafka的消费者 client-id
consumer.client.id=consumer-connection-record
#回写给kafka的生产者 client-id
producer.client.id=producer-connection-record
#--------------------------------topology配置------------------------------#
#consumer 并行度
consumer.parallelism=3
#map函数并行度
map.parallelism=3
#producer 并行度
producer.parallelism=3
#数据中心,取值范围(0-63)
data.center.id.num=0
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#app_id 更新时间如填写0则不更新缓存
app.tick.tuple.freq.secs=0
#--------------------------------默认值配置------------------------------#
#邮件默认编码
mail.default.charset=UTF-8
#0不需要补全原样输出日志1需要补全
log.need.complete=1