radius日志补全代码更新

This commit is contained in:
李玺康
2019-11-12 11:29:19 +08:00
parent 7122a40f33
commit 11a389c14f
18 changed files with 2340 additions and 1015 deletions

View File

@@ -31,13 +31,13 @@
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>192.168.40.202,192.168.40.203,192.168.40.206</value>
<value>master,slave1,slave2</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>60010</value>
</property>
<!-- 开启启schema支持 对应hbase的namespace -->
#开启schema支持 对应hbase的namespace
<property>
<name>phoenix.schema.isNamespaceMappingEnabled</name>
<value>true</value>
@@ -46,36 +46,4 @@
<name>phoenix.schema.mapSystemTablesToNamespace</name>
<value>true</value>
</property>
<property>
<name>hbase.client.keyvalue.maxsize</name>
<value>99428800</value>
</property>
<property>
<name>hbase.server.keyvalue.maxsize</name>
<value>99428800</value>
</property>
<property>
<name>hbase.regionserver.wal.codec</name>
<value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
</property>
<property>
<name>phoenix.query.timeoutMs</name>
<value>1800000</value>
</property>
<property>
<name>hbase.regionserver.lease.period</name>
<value>1200000</value>
</property>
<property>
<name>hbase.rpc.timeout</name>
<value>1200000</value>
</property>
<property>
<name>hbase.client.scanner.caching</name>
<value>1000</value>
</property>
<property>
<name>hbase.client.scanner.timeout.period</name>
<value>1200000</value>
</property>
</configuration>

View File

@@ -31,7 +31,7 @@
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>192.168.40.202:9001</value>
<value>master:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
@@ -60,27 +60,27 @@
<!-- nn1的RPC通信地址nn1所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>192.168.40.202:9000</value>
<value>master:9000</value>
</property>
<!-- nn1的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>192.168.40.202:50070</value>
<value>master:50070</value>
</property>
<!-- nn2的RPC通信地址nn2所在地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>192.168.40.203:9000</value>
<value>slave1:9000</value>
</property>
<!-- nn2的http通信地址外部访问地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>192.168.40.203:50070</value>
<value>slave1:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://192.168.40.203:8485;192.168.40.206:8485;192.168.40.202:8485/ns1</value>
<value>qjournal://slave1:8485;slave2:8485;master:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>

View File

@@ -1,11 +1,11 @@
#管理kafka地址
bootstrap.servers=192.168.40.119:9092,192.168.40.122:9092,192.168.40.123:9092
bootstrap.servers=192.168.40.152:9092
#zookeeper 地址
zookeeper.servers=192.168.40.119:2181,192.168.40.122:2181,192.168.40.123:2181
zookeeper.servers=192.168.40.152:2181
#hbase zookeeper地址
hbase.zookeeper.servers=192.168.40.203:2186
hbase.zookeeper.servers=192.168.40.203:2181,192.168.40.206:2181
#hbase tablename
hbase.table.name=subcriber_info
@@ -14,29 +14,29 @@ hbase.table.name=subcriber_info
auto.offset.reset=latest
#kafka broker下的topic名称
kafka.topic=SESSION-RECORD-LOG
kafka.topic=SECURITY-POLICY-LOG
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=session-completion-program
#输出topic
results.output.topic=SESSION-RECORD-COMPLETED-LOG
results.output.topic=SECURITY-POLICY-COMPLETED-LOG
#storm topology workers
topology.workers=2
topology.workers=1
#spout并行度 建议与kafka分区数相同
spout.parallelism=3
spout.parallelism=1
#处理补全操作的bolt并行度-worker的倍数
datacenter.bolt.parallelism=3
datacenter.bolt.parallelism=1
#写入kafkad的并行度
kafka.bolt.parallelism=3
#写入kafka的并行度
kafka.bolt.parallelism=1
#定位库地址
ip.library=/home/ceiec/topology/dat/
#ip.library=/home/ceiec/topology/dat/
ip.library=D:/dat/
#kafka批量条数
batch.insert.num=5000