68 lines
1.6 KiB
Properties
68 lines
1.6 KiB
Properties
#管理kafka地址
|
||
#bootstrap.servers=192.168.40.119:9092,192.168.40.122:9092,192.168.40.123:9092
|
||
bootstrap.servers=192.168.40.207:9092
|
||
|
||
#zookeeper 地址
|
||
zookeeper.servers=192.168.40.207:2181
|
||
#zookeeper.servers=192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181
|
||
|
||
#hbase zookeeper地址
|
||
#hbase.zookeeper.servers=192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181
|
||
hbase.zookeeper.servers=192.168.40.203:2181
|
||
|
||
#hbase tablename
|
||
hbase.table.name=subscriber_info
|
||
|
||
#latest/earliest
|
||
auto.offset.reset=latest
|
||
|
||
#压缩模式 none or snappy
|
||
kafka.compression.type=none
|
||
|
||
#kafka broker下的topic名称
|
||
#kafka.topic=SECURITY-EVENT-LOG
|
||
kafka.topic=test615
|
||
#kafka.topic=CONNECTION-RECORD-LOG
|
||
|
||
#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
|
||
group.id=lxk615
|
||
|
||
#输出topic
|
||
results.output.topic=agg_test
|
||
#results.output.topic=SECURITY-EVENT-COMPLETED-LOG
|
||
#聚合时间,单位秒
|
||
agg.time=30
|
||
#storm topology workers
|
||
topology.workers=1
|
||
|
||
#spout并行度 建议与kafka分区数相同
|
||
spout.parallelism=1
|
||
|
||
#处理补全操作的bolt并行度-worker的倍数
|
||
datacenter.bolt.parallelism=1
|
||
|
||
#写入kafka的并行度10
|
||
kafka.bolt.parallelism=1
|
||
|
||
#kafka批量条数
|
||
batch.insert.num=2000
|
||
|
||
#网关的schema位置
|
||
schema.http=http://192.168.40.224:9999/metadata/schema/v1/fields/connection_record_log
|
||
|
||
#数据中心(UID)
|
||
data.center.id.num=15
|
||
|
||
|
||
#ack设置 1启动ack 0不启动ack
|
||
topology.num.acks=0
|
||
|
||
#spout接收睡眠时间
|
||
topology.spout.sleep.time=1
|
||
|
||
#允许发送kafka最大失败数
|
||
max.failure.num=20
|
||
|
||
#邮件默认编码
|
||
mail.default.charset=UTF-8
|