104 lines
2.5 KiB
Properties
104 lines
2.5 KiB
Properties
#--------------------------------地址配置------------------------------#
|
||
|
||
#管理kafka地址
|
||
input.kafka.servers=192.168.44.12:9092
|
||
|
||
#管理输出kafka地址
|
||
output.kafka.servers=192.168.44.12:9092
|
||
|
||
#zookeeper 地址 用于配置log_id
|
||
zookeeper.servers=192.168.44.12:2181
|
||
|
||
#hbase zookeeper地址 用于连接HBase
|
||
hbase.zookeeper.servers=192.168.44.12:2181
|
||
|
||
#--------------------------------HTTP/定位库------------------------------#
|
||
#定位库地址
|
||
ip.library=D:\\K18-Phase2\\tsgSpace\\dat\\
|
||
#ip.library=/home/bigdata/topology/dat/
|
||
|
||
#网关的schema位置
|
||
schema.http=http://192.168.44.12:9999/metadata/schema/v1/fields/security_event_log
|
||
|
||
#网关APP_ID 获取接口
|
||
app.id.http=http://192.168.44.67:9999/open-api/appDicList
|
||
|
||
#--------------------------------Kafka消费组信息------------------------------#
|
||
|
||
#kafka 接收数据topic
|
||
kafka.topic=test
|
||
|
||
#补全数据 输出 topic
|
||
results.output.topic=test-result
|
||
|
||
#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
|
||
group.id=connection-record-log-20200818-1-test
|
||
|
||
#生产者压缩模式 none or snappy
|
||
producer.kafka.compression.type=none
|
||
|
||
#生产者ack
|
||
producer.ack=1
|
||
|
||
#latest/earliest 从当前消 or 从头消费
|
||
auto.offset.reset=latest
|
||
|
||
#接收自kafka的消费者 client-id
|
||
consumer.client.id=consumer-connection-record
|
||
|
||
#回写给kafka的生产者 client-id
|
||
producer.client.id=producer-connection-record
|
||
|
||
#--------------------------------topology配置------------------------------#
|
||
|
||
#storm topology workers
|
||
topology.workers=2
|
||
|
||
#spout并行度 建议与kafka分区数相同
|
||
spout.parallelism=3
|
||
|
||
#处理补全操作的bolt并行度-worker的倍数
|
||
completion.bolt.parallelism=6
|
||
|
||
#写入kafka的并行度10
|
||
kafka.bolt.parallelism=6
|
||
|
||
#数据中心(UID)
|
||
data.center.id.num=15
|
||
|
||
#hbase 更新时间
|
||
hbase.tick.tuple.freq.secs=60
|
||
|
||
#app_id 更新时间
|
||
app.tick.tuple.freq.secs=60
|
||
|
||
#--------------------------------默认值配置------------------------------#
|
||
|
||
#当bolt性能受限时,限制spout接收速度,理论看ack开启才有效
|
||
topology.config.max.spout.pending=150000
|
||
|
||
#hbase table name
|
||
hbase.table.name=subscriber_info
|
||
|
||
#ack设置 1启动ack 0不启动ack
|
||
topology.num.acks=0
|
||
|
||
#kafka批量条数
|
||
batch.insert.num=2000
|
||
|
||
#tick时钟频率
|
||
topology.tick.tuple.freq.secs=5
|
||
|
||
#spout接收睡眠时间
|
||
topology.spout.sleep.time=1
|
||
|
||
#允许发送kafka最大失败数
|
||
max.failure.num=20
|
||
|
||
#邮件默认编码
|
||
mail.default.charset=UTF-8
|
||
|
||
#需不要补全,不需要则原样日志输出
|
||
log.need.complete=yes
|
||
|