内部应用服务器flume初始版本
This commit is contained in:
10
internal-flume/conf/connection/clean_start_flume.sh
Executable file
10
internal-flume/conf/connection/clean_start_flume.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
|
||||
|
||||
#清除日志和checkpoint以及历史data
|
||||
rm -rf ${BASE_DIR}/logs/*
|
||||
|
||||
nohup ${BASE_DIR}/${DAE_NAME} $1 $2 >/dev/null 2>&1 &
|
||||
40
internal-flume/conf/connection/connection_f2k1.properties
Normal file
40
internal-flume/conf/connection/connection_f2k1.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
connection.sources = s2
|
||||
connection.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
connection.sources.s2.channels = c2
|
||||
#taildir source
|
||||
connection.sources.s2.type = TAILDIR
|
||||
connection.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
connection.sources.s2.positionFile = /home/test/1connection_position.json
|
||||
#需要分组13台主机的数据
|
||||
connection.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
connection.sources.s2.filegroups.f1 = /home/data/192.168.60.101/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f2 = /home/data/192.168.60.102/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f3 = /home/data/192.168.60.103/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f4 = /home/data/192.168.60.104/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f5 = /home/data/192.168.60.105/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f6 = /home/data/192.168.60.106/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f7 = /home/data/192.168.60.107/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f8 = /home/data/192.168.60.108/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f9 = /home/data/192.168.60.109/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f10 = /home/data/192.168.60.110/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f11 = /home/data/192.168.60.111/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f12 = /home/data/192.168.60.112/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
connection.sources.s2.filegroups.f13 = /home/data/192.168.60.113/CONNECTION-RECORD-LOG\CONNECTION-RECORD-LOG/.*[0-1].dat
|
||||
|
||||
|
||||
connection.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
connection.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
connection.channels.c2.brokerList = 192.168.40.222:9093
|
||||
connection.channels.c2.topic = CONNECTION-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
connection.channels.c2.parseAsFlumeEvent = false
|
||||
connection.channels.c2.kafka.producer.acks = 1
|
||||
connection.channels.c2.producer.type=sync
|
||||
connection.channels.c2.queue.buffering.max.ms = 5000
|
||||
connection.channels.c2.queue.buffering.max.messages=20000
|
||||
connection.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/connection/connection_f2k2.properties
Normal file
40
internal-flume/conf/connection/connection_f2k2.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
connection.sources = s2
|
||||
connection.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
connection.sources.s2.channels = c2
|
||||
#taildir source
|
||||
connection.sources.s2.type = TAILDIR
|
||||
connection.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
connection.sources.s2.positionFile = /home/test/2connection_position.json
|
||||
#需要分组13台主机的数据
|
||||
connection.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
connection.sources.s2.filegroups.f1 = /home/data/192.168.60.101/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f2 = /home/data/192.168.60.102/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f3 = /home/data/192.168.60.103/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f4 = /home/data/192.168.60.104/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f5 = /home/data/192.168.60.105/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f6 = /home/data/192.168.60.106/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f7 = /home/data/192.168.60.107/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f8 = /home/data/192.168.60.108/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f9 = /home/data/192.168.60.109/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f10 = /home/data/192.168.60.110/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f11 = /home/data/192.168.60.111/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f12 = /home/data/192.168.60.112/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
connection.sources.s2.filegroups.f13 = /home/data/192.168.60.113/CONNECTION-RECORD-LOG/CONNECTION-RECORD-LOG/.*[2-3].dat
|
||||
|
||||
|
||||
connection.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
connection.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
connection.channels.c2.brokerList = 192.168.40.222:9093
|
||||
connection.channels.c2.topic = CONNECTION-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
connection.channels.c2.parseAsFlumeEvent = false
|
||||
connection.channels.c2.kafka.producer.acks = 1
|
||||
connection.channels.c2.producer.type=sync
|
||||
connection.channels.c2.queue.buffering.max.ms = 5000
|
||||
connection.channels.c2.queue.buffering.max.messages=20000
|
||||
connection.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/connection/connection_f2k3.properties
Normal file
40
internal-flume/conf/connection/connection_f2k3.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
connection.sources = s2
|
||||
connection.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
connection.sources.s2.channels = c2
|
||||
#taildir source
|
||||
connection.sources.s2.type = TAILDIR
|
||||
connection.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
connection.sources.s2.positionFile = /home/test/3connection_position.json
|
||||
#需要分组13台主机的数据
|
||||
connection.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
connection.sources.s2.filegroups.f1 = /home/data/192.168.60.101/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f2 = /home/data/192.168.60.102/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f3 = /home/data/192.168.60.103/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f4 = /home/data/192.168.60.104/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f5 = /home/data/192.168.60.105/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f6 = /home/data/192.168.60.106/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f7 = /home/data/192.168.60.107/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f8 = /home/data/192.168.60.108/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f9 = /home/data/192.168.60.109/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f10 = /home/data/192.168.60.110/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f11 = /home/data/192.168.60.111/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f12 = /home/data/192.168.60.112/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
connection.sources.s2.filegroups.f13 = /home/data/192.168.60.113/CONNECTION-RECORD-LOG/.*[4-5].dat
|
||||
|
||||
|
||||
connection.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
connection.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
connection.channels.c2.brokerList = 192.168.40.222:9093
|
||||
connection.channels.c2.topic = CONNECTION-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
connection.channels.c2.parseAsFlumeEvent = false
|
||||
connection.channels.c2.kafka.producer.acks = 1
|
||||
connection.channels.c2.producer.type=sync
|
||||
connection.channels.c2.queue.buffering.max.ms = 5000
|
||||
connection.channels.c2.queue.buffering.max.messages=20000
|
||||
connection.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/connection/connection_f2k4.properties
Normal file
40
internal-flume/conf/connection/connection_f2k4.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
connection.sources = s2
|
||||
connection.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
connection.sources.s2.channels = c2
|
||||
#taildir source
|
||||
connection.sources.s2.type = TAILDIR
|
||||
connection.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
connection.sources.s2.positionFile = /home/test/4connection_position.json
|
||||
#需要分组13台主机的数据
|
||||
connection.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
connection.sources.s2.filegroups.f1 = /home/data/192.168.60.101/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f2 = /home/data/192.168.60.102/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f3 = /home/data/192.168.60.103/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f4 = /home/data/192.168.60.104/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f5 = /home/data/192.168.60.105/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f6 = /home/data/192.168.60.106/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f7 = /home/data/192.168.60.107/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f8 = /home/data/192.168.60.108/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f9 = /home/data/192.168.60.109/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f10 = /home/data/192.168.60.110/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f11 = /home/data/192.168.60.111/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f12 = /home/data/192.168.60.112/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
connection.sources.s2.filegroups.f13 = /home/data/192.168.60.113/CONNECTION-RECORD-LOG/.*[6-7].dat
|
||||
|
||||
|
||||
connection.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
connection.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
connection.channels.c2.brokerList = 192.168.40.222:9093
|
||||
connection.channels.c2.topic = CONNECTION-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
connection.channels.c2.parseAsFlumeEvent = false
|
||||
connection.channels.c2.kafka.producer.acks = 1
|
||||
connection.channels.c2.producer.type=sync
|
||||
connection.channels.c2.queue.buffering.max.ms = 5000
|
||||
connection.channels.c2.queue.buffering.max.messages=20000
|
||||
connection.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/connection/connection_f2k5.properties
Normal file
40
internal-flume/conf/connection/connection_f2k5.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
connection.sources = s2
|
||||
connection.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
connection.sources.s2.channels = c2
|
||||
#taildir source
|
||||
connection.sources.s2.type = TAILDIR
|
||||
connection.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
connection.sources.s2.positionFile = /home/test/5connection_position.json
|
||||
#需要分组13台主机的数据
|
||||
connection.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
connection.sources.s2.filegroups.f1 = /home/data/192.168.60.101/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f2 = /home/data/192.168.60.102/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f3 = /home/data/192.168.60.103/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f4 = /home/data/192.168.60.104/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f5 = /home/data/192.168.60.105/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f6 = /home/data/192.168.60.106/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f7 = /home/data/192.168.60.107/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f8 = /home/data/192.168.60.108/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f9 = /home/data/192.168.60.109/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f10 = /home/data/192.168.60.110/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f11 = /home/data/192.168.60.111/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f12 = /home/data/192.168.60.112/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
connection.sources.s2.filegroups.f13 = /home/data/192.168.60.113/CONNECTION-RECORD-LOG/.*[8-9].dat
|
||||
|
||||
|
||||
connection.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
connection.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
connection.channels.c2.brokerList = 192.168.40.222:9093
|
||||
connection.channels.c2.topic = CONNECTION-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
connection.channels.c2.parseAsFlumeEvent = false
|
||||
connection.channels.c2.kafka.producer.acks = 1
|
||||
connection.channels.c2.producer.type=sync
|
||||
connection.channels.c2.queue.buffering.max.ms = 5000
|
||||
connection.channels.c2.queue.buffering.max.messages=20000
|
||||
connection.channels.c2.batch.num.messages=5000
|
||||
15
internal-flume/conf/connection/count_flume.sh
Executable file
15
internal-flume/conf/connection/count_flume.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${JAR_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${JAR_NAME} | grep -v grep | awk '{print $2}')
|
||||
echo 'flume '${JAR_NAME}' total process-->'${NUM1}
|
||||
if [ "${NUM1}" -ge "5" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
echo 'flume '${JAR_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
39
internal-flume/conf/connection/dae_connection.sh
Executable file
39
internal-flume/conf/connection/dae_connection.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
#JAR_NAME=ktk_ip_asn.properties
|
||||
PROPERTIES_NAME=connection_f2k
|
||||
#flume进程名称
|
||||
FLUME_NAME=connection
|
||||
#flume根目录
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#当前路径
|
||||
CONF_DIR=$(cd $(dirname $0); pwd)
|
||||
#进程总数
|
||||
PROCESS_SUM=5
|
||||
|
||||
echo "##############################################################" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
id=0 #配置文件后面的序号,无特殊作用
|
||||
flag=0 # flag为0表示初始化状态,为1表示完整启动成功所有进程,为2表示进程意外停止之后又重新杀死其余的进程并重新启动所有进程
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep ${PROPERTIES_NAME} | grep -v grep | grep -v dae |wc -l`
|
||||
pids=$(ps -ef | grep ${PROPERTIES_NAME}\* | grep properties | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
if [[ "${NUM}" -ne ${PROCESS_SUM} && $flag -eq "1" ]];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
flag=2
|
||||
#如果正在运行的进程数小于定义的进程数,就启动
|
||||
elif [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
id=$(( ( ($id) % $PROCESS_SUM ) + 1 ))
|
||||
nohup ${BASE_DIR}/bin/flume-ng agent -n ${FLUME_NAME} -c ${CONF_DIR} -f ${CONF_DIR}/${PROPERTIES_NAME}$id.properties >/dev/null 2>&1 &
|
||||
echo "${time_stamp} ---> the ${PROPERTIES_NAME}_APP restart ---> $id" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
#进程数如果等于指定的进程数,那就设置flag为1 id为0
|
||||
elif [ "${NUM}" -eq ${PROCESS_SUM} ];then
|
||||
flag=1
|
||||
id=0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
35
internal-flume/conf/connection/flume-env.sh
Executable file
35
internal-flume/conf/connection/flume-env.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
|
||||
# during Flume startup.
|
||||
|
||||
# Enviroment variables can be set here.
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
|
||||
|
||||
# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
|
||||
#export JAVA_OPTS="-Xms1024m -Xmx3072m -Dcom.sun.management.jmxremote"
|
||||
export JAVA_OPTS="-Xms512m -Xmx2048m -Xss256k -Xmn1g -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Dcom.sun.management.jmxremote"
|
||||
|
||||
# Let Flume write raw event data and configuration information to its log files for debugging
|
||||
# purposes. Enabling these flags is not recommended in production,
|
||||
# as it may result in logging sensitive user information or encryption secrets.
|
||||
# export JAVA_OPTS="$JAVA_OPTS -Dorg.apache.flume.log.rawdata=true -Dorg.apache.flume.log.printconfig=true "
|
||||
|
||||
# Note that the Flume conf directory is always included in the classpath.
|
||||
#FLUME_CLASSPATH=""
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
##############################################################
|
||||
20200512180513 ---> the connection_f2k_APP restart ---> 1
|
||||
20200512180523 ---> the connection_f2k_APP restart ---> 2
|
||||
20200512180533 ---> the connection_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512180536 ---> the connection_f2k_APP restart ---> 1
|
||||
20200512180546 ---> the connection_f2k_APP restart ---> 2
|
||||
20200512180556 ---> the connection_f2k_APP restart ---> 3
|
||||
20200512180606 ---> the connection_f2k_APP restart ---> 4
|
||||
20200512180616 ---> the connection_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512183204 ---> the connection_f2k_APP restart ---> 1
|
||||
20200512183214 ---> the connection_f2k_APP restart ---> 2
|
||||
20200512183224 ---> the connection_f2k_APP restart ---> 3
|
||||
20200512183234 ---> the connection_f2k_APP restart ---> 4
|
||||
20200512183245 ---> the connection_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512184503 ---> the connection_f2k_APP restart ---> 1
|
||||
20200512184513 ---> the connection_f2k_APP restart ---> 2
|
||||
20200512184523 ---> the connection_f2k_APP restart ---> 3
|
||||
20200512184533 ---> the connection_f2k_APP restart ---> 4
|
||||
20200512184543 ---> the connection_f2k_APP restart ---> 5
|
||||
5
internal-flume/conf/connection/start_flume.sh
Executable file
5
internal-flume/conf/connection/start_flume.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup ${BASE_DIR}/dae_f2k.sh >/dev/null 2>&1 &
|
||||
31
internal-flume/conf/connection/stop_flume.sh
Executable file
31
internal-flume/conf/connection/stop_flume.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/sh
|
||||
|
||||
#DAE_NAME=dae_k2ha.sh
|
||||
#JAR_NAME=k2ha.properties
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k\* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${DAE_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${DAE_NAME} | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM1}" -ge "1" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
kill -9 $pid1
|
||||
echo 'killed '${DAE_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
|
||||
for jar in $JAR_NAME ; do
|
||||
|
||||
NUM2=`ps -ef | grep $jar | grep -v grep | wc -l`
|
||||
pids2=$(ps -ef | grep $jar | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM2}" -ge "1" ];then
|
||||
for pid2 in $pids2
|
||||
do
|
||||
kill -9 $pid2
|
||||
echo 'killed '${JAR_NAME}' process-->'$pid2
|
||||
done
|
||||
fi
|
||||
done
|
||||
10
internal-flume/conf/k2f/clean_start_flume.sh
Executable file
10
internal-flume/conf/k2f/clean_start_flume.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
|
||||
|
||||
#清除日志和checkpoint以及历史data
|
||||
rm -rf ${BASE_DIR}/logs/*
|
||||
|
||||
nohup ${BASE_DIR}/${DAE_NAME} $1 $2 >/dev/null 2>&1 &
|
||||
15
internal-flume/conf/k2f/count_flume.sh
Executable file
15
internal-flume/conf/k2f/count_flume.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep ^k2* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${JAR_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${JAR_NAME} | grep -v grep | awk '{print $2}')
|
||||
echo 'flume '${JAR_NAME}' total process-->'${NUM1}
|
||||
if [ "${NUM1}" -ge "1" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
echo 'flume '${JAR_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
39
internal-flume/conf/k2f/dae_f2k.sh
Executable file
39
internal-flume/conf/k2f/dae_f2k.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
#JAR_NAME=ktk_ip_asn.properties
|
||||
PROPERTIES_NAME=security_f2k
|
||||
#flume进程名称
|
||||
FLUME_NAME=security
|
||||
#flume根目录
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#当前路径
|
||||
CONF_DIR=$(cd $(dirname $0); pwd)
|
||||
#进程总数
|
||||
PROCESS_SUM=3
|
||||
|
||||
echo "##############################################################" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
id=0 #配置文件后面的序号,无特殊作用
|
||||
flag=0 # flag为0表示初始化状态,为1表示完整启动成功所有进程,为2表示进程意外停止之后又重新杀死其余的进程并重新启动所有进程
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep ${PROPERTIES_NAME} | grep -v grep | grep -v dae |wc -l`
|
||||
pids=$(ps -ef | grep ${PROPERTIES_NAME}\* | grep properties | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
if [[ "${NUM}" -ne ${PROCESS_SUM} && $flag -eq "1" ]];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
flag=2
|
||||
#如果正在运行的进程数小于定义的进程数,就启动
|
||||
elif [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
id=$(( ( ($id) % $PROCESS_SUM ) + 1 ))
|
||||
nohup ${BASE_DIR}/bin/flume-ng agent -n ${FLUME_NAME} -c ${CONF_DIR} -f ${CONF_DIR}/${PROPERTIES_NAME}$id.properties >/dev/null 2>&1 &
|
||||
echo "${time_stamp} ---> the ${PROPERTIES_NAME}_APP restart ---> $id" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
#进程数如果等于指定的进程数,那就设置flag为1 id为0
|
||||
elif [ "${NUM}" -eq ${PROCESS_SUM} ];then
|
||||
flag=1
|
||||
id=0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
35
internal-flume/conf/k2f/flume-env.sh
Executable file
35
internal-flume/conf/k2f/flume-env.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
|
||||
# during Flume startup.
|
||||
|
||||
# Enviroment variables can be set here.
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
|
||||
|
||||
# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
|
||||
#export JAVA_OPTS="-Xms1024m -Xmx3072m -Dcom.sun.management.jmxremote"
|
||||
export JAVA_OPTS="-Xms512m -Xmx2048m -Xss256k -Xmn1g -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Dcom.sun.management.jmxremote"
|
||||
|
||||
# Let Flume write raw event data and configuration information to its log files for debugging
|
||||
# purposes. Enabling these flags is not recommended in production,
|
||||
# as it may result in logging sensitive user information or encryption secrets.
|
||||
# export JAVA_OPTS="$JAVA_OPTS -Dorg.apache.flume.log.rawdata=true -Dorg.apache.flume.log.printconfig=true "
|
||||
|
||||
# Note that the Flume conf directory is always included in the classpath.
|
||||
#FLUME_CLASSPATH=""
|
||||
|
||||
22
internal-flume/conf/k2f/k2f.properties
Normal file
22
internal-flume/conf/k2f/k2f.properties
Normal file
@@ -0,0 +1,22 @@
|
||||
#为source channel sink起名
|
||||
a1.channels = c1
|
||||
a1.sinks = k1
|
||||
#指定channel为kafka channel,省略source使得效率更高
|
||||
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a1.channels.c1.kafka.bootstrap.servers = 192.168.40.119:9092
|
||||
a1.channels.c1.kafka.topic = test
|
||||
a1.channels.c1.kafka.consumer.group.id = lxk_0512
|
||||
a1.channels.c1.kafka.consumer.auto.offset.reset = latest
|
||||
a1.channels.c1.kafka.consumer.enable.auto.commit = true
|
||||
a1.channels.c1.kafka.consumer.fetch.max.wait.ms = 1000
|
||||
a1.channels.c1.kafka.consumer.fetch.min.bytes = 10485760
|
||||
a1.channels.c1.parseAsFlumeEvent = false
|
||||
|
||||
#原始file roll sink,将数据写到本机磁盘
|
||||
a1.sinks.k1.type = file_roll
|
||||
a1.sinks.k1.channel = c1
|
||||
a1.sinks.k1.sink.pathManager = default
|
||||
a1.sinks.k1.sink.pathManager.extension = dat
|
||||
a1.sinks.k1.sink.pathManager.prefix = test-
|
||||
a1.sinks.k1.sink.rollInterval = 30
|
||||
a1.sinks.k1.sink.directory = /home/test/log
|
||||
24
internal-flume/conf/k2f/pro/2f2k.properties
Normal file
24
internal-flume/conf/k2f/pro/2f2k.properties
Normal file
@@ -0,0 +1,24 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/2taildir_position.json
|
||||
a2.sources.s2.filegroups = f2
|
||||
a2.sources.s2.filegroups.f2 = /home/test/log2/.*dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.222:9092
|
||||
a2.channels.c2.zookeeperConnect=192.168.40.222:2181/kafka
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
58
internal-flume/conf/k2f/pro/4f2k.properties
Normal file
58
internal-flume/conf/k2f/pro/4f2k.properties
Normal file
@@ -0,0 +1,58 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2 c3 c4
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.positionFile = /home/test/4taildir_position.json
|
||||
a2.sources.s2.filegroups = f3
|
||||
a2.sources.s2.filegroups.f3 = /home/test/recv/.*dat
|
||||
#a2.sources.s2.headers.f3.headerKey1 = channel2
|
||||
#a2.sources.s2.filegroups.f4 = /home/test/log/.*dat
|
||||
#a2.sources.s2.headers.f4.headerKey1 = channel3
|
||||
a2.sources.s2.maxBatchCount = 500
|
||||
#a2.sources.s2.selector.type = multiplexing
|
||||
|
||||
#channel selector
|
||||
#a2.sources.s2.selector.header = headerKey1
|
||||
#a2.sources.s2.selector.mapping.channel2=c2
|
||||
#a2.sources.s2.selector.mapping.channel3=c3
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.222:9093
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=200000
|
||||
a2.channels.c2.batch.num.messages=5000
|
||||
|
||||
#第二个channel
|
||||
a2.channels.c3.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c3.brokerList = 192.168.40.222:9093
|
||||
a2.channels.c3.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c3.parseAsFlumeEvent = false
|
||||
a2.channels.c3.kafka.producer.acks = 1
|
||||
a2.channels.c3.producer.type=async
|
||||
a2.channels.c3.queue.buffering.max.ms = 5000
|
||||
a2.channels.c3.queue.buffering.max.messages=20000
|
||||
a2.channels.c3.batch.num.messages=500
|
||||
|
||||
|
||||
|
||||
#第三个channel
|
||||
a2.channels.c4.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c4.brokerList = 192.168.40.222:9093
|
||||
a2.channels.c4.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c4.parseAsFlumeEvent = false
|
||||
a2.channels.c4.kafka.producer.acks = 1
|
||||
a2.channels.c4.producer.type=async
|
||||
a2.channels.c4.queue.buffering.max.ms = 5000
|
||||
a2.channels.c4.queue.buffering.max.messages=20000
|
||||
a2.channels.c4.batch.num.messages=500
|
||||
|
||||
24
internal-flume/conf/k2f/pro/f2k.properties
Normal file
24
internal-flume/conf/k2f/pro/f2k.properties
Normal file
@@ -0,0 +1,24 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/taildir_position.json
|
||||
a2.sources.s2.filegroups = f0
|
||||
a2.sources.s2.filegroups.f0 = /home/test/log/.*dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.222:9092
|
||||
a2.channels.c2.zookeeperConnect=192.168.40.222:2181/kafka
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
23
internal-flume/conf/k2f/pro/f2k1.properties
Normal file
23
internal-flume/conf/k2f/pro/f2k1.properties
Normal file
@@ -0,0 +1,23 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/1taildir_position.json
|
||||
a2.sources.s2.filegroups = f1
|
||||
a2.sources.s2.filegroups.f1 = /home/test/log/.*[0-2].dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.222:9093
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
23
internal-flume/conf/k2f/pro/f2k2.properties
Normal file
23
internal-flume/conf/k2f/pro/f2k2.properties
Normal file
@@ -0,0 +1,23 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/2taildir_position.json
|
||||
a2.sources.s2.filegroups = f1
|
||||
a2.sources.s2.filegroups.f1 = /home/test/log/.*[3-4].dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.222:9093
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
23
internal-flume/conf/k2f/pro/f2k3.properties
Normal file
23
internal-flume/conf/k2f/pro/f2k3.properties
Normal file
@@ -0,0 +1,23 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/1taildir_position.json
|
||||
a2.sources.s2.filegroups = f1
|
||||
a2.sources.s2.filegroups.f1 = /home/test/log/.*[0-2].dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.222:9093
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
29
internal-flume/conf/k2f/pro/file.properties
Normal file
29
internal-flume/conf/k2f/pro/file.properties
Normal file
@@ -0,0 +1,29 @@
|
||||
#为source channel sink起名
|
||||
a1.sources = s1
|
||||
a1.channels = c1
|
||||
a1.sinks = k1
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a1.sources.s1.channels = c1
|
||||
#指定source数据收集策略
|
||||
a1.sources.s1.type = org.apache.flume.source.kafka.KafkaSource
|
||||
a1.sources.s1.batchSize = 3000
|
||||
a1.sources.s1.batchDurationMillis = 100
|
||||
a1.sources.s1.kafka.bootstrap.servers = 192.168.40.203:9092
|
||||
a1.sources.s1.kafka.topics = test
|
||||
a1.sources.s1.kafka.consumer.group.id = lxk_0429
|
||||
|
||||
#指定channel为memory,即表示所有的数据都装进memory当中
|
||||
a1.channels.c1.type = memory
|
||||
a1.channels.c1.capacity = 2000000
|
||||
a1.channels.c1.transactionCapacity = 30000
|
||||
a1.channels.c1.byteCapacityBufferPercentage = 40
|
||||
a1.channels.c1.byteCapacity = 2147483648
|
||||
|
||||
#原始file roll sink,将数据写到本机磁盘
|
||||
a1.sinks.k1.type = file_roll
|
||||
a1.sinks.k1.channel = c1
|
||||
a1.sinks.k1.sink.pathManager = default
|
||||
a1.sinks.k1.sink.pathManager.extension = dat
|
||||
a1.sinks.k1.sink.pathManager.prefix = test-
|
||||
a1.sinks.k1.sink.rollInterval = 30
|
||||
a1.sinks.k1.sink.directory = /home/test/log2
|
||||
29
internal-flume/conf/k2f/pro/file1.properties
Normal file
29
internal-flume/conf/k2f/pro/file1.properties
Normal file
@@ -0,0 +1,29 @@
|
||||
#为source channel sink起名
|
||||
a1.sources = s1
|
||||
a1.channels = c1
|
||||
a1.sinks = k1
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a1.sources.s1.channels = c1
|
||||
#指定source数据收集策略
|
||||
a1.sources.s1.type = org.apache.flume.source.kafka.KafkaSource
|
||||
a1.sources.s1.batchSize = 3000
|
||||
a1.sources.s1.batchDurationMillis = 100
|
||||
a1.sources.s1.kafka.bootstrap.servers = 192.168.40.203:9092
|
||||
a1.sources.s1.kafka.topics = test
|
||||
a1.sources.s1.kafka.consumer.group.id = lxk_0429
|
||||
|
||||
#指定channel为memory,即表示所有的数据都装进memory当中
|
||||
a1.channels.c1.type = memory
|
||||
a1.channels.c1.capacity = 2000000
|
||||
a1.channels.c1.transactionCapacity = 30000
|
||||
a1.channels.c1.byteCapacityBufferPercentage = 40
|
||||
a1.channels.c1.byteCapacity = 2147483648
|
||||
|
||||
#原始file roll sink,将数据写到本机磁盘
|
||||
a1.sinks.k1.type = file_roll
|
||||
a1.sinks.k1.channel = c1
|
||||
a1.sinks.k1.sink.pathManager = default
|
||||
a1.sinks.k1.sink.pathManager.extension = dat
|
||||
a1.sinks.k1.sink.pathManager.prefix = test-
|
||||
a1.sinks.k1.sink.rollInterval = 30
|
||||
a1.sinks.k1.sink.directory = /home/test/log1
|
||||
29
internal-flume/conf/k2f/pro/file2.properties
Normal file
29
internal-flume/conf/k2f/pro/file2.properties
Normal file
@@ -0,0 +1,29 @@
|
||||
#为source channel sink起名
|
||||
a1.sources = s1
|
||||
a1.channels = c1
|
||||
a1.sinks = k1
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a1.sources.s1.channels = c1
|
||||
#指定source数据收集策略
|
||||
a1.sources.s1.type = org.apache.flume.source.kafka.KafkaSource
|
||||
a1.sources.s1.batchSize = 3000
|
||||
a1.sources.s1.batchDurationMillis = 100
|
||||
a1.sources.s1.kafka.bootstrap.servers = 192.168.40.203:9092
|
||||
a1.sources.s1.kafka.topics = test
|
||||
a1.sources.s1.kafka.consumer.group.id = lxk_0429
|
||||
|
||||
#指定channel为memory,即表示所有的数据都装进memory当中
|
||||
a1.channels.c1.type = memory
|
||||
a1.channels.c1.capacity = 2000000
|
||||
a1.channels.c1.transactionCapacity = 30000
|
||||
a1.channels.c1.byteCapacityBufferPercentage = 40
|
||||
a1.channels.c1.byteCapacity = 2147483648
|
||||
|
||||
#原始file roll sink,将数据写到本机磁盘
|
||||
a1.sinks.k1.type = file_roll
|
||||
a1.sinks.k1.channel = c1
|
||||
a1.sinks.k1.sink.pathManager = default
|
||||
a1.sinks.k1.sink.pathManager.extension = dat
|
||||
a1.sinks.k1.sink.pathManager.prefix = test-
|
||||
a1.sinks.k1.sink.rollInterval = 30
|
||||
a1.sinks.k1.sink.directory = /home/test/log2
|
||||
22
internal-flume/conf/k2f/pro/k2f.properties
Normal file
22
internal-flume/conf/k2f/pro/k2f.properties
Normal file
@@ -0,0 +1,22 @@
|
||||
#为source channel sink起名
|
||||
a1.channels = c1
|
||||
a1.sinks = k1
|
||||
#指定channel为kafka channel,省略source使得效率更高
|
||||
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a1.channels.c1.kafka.bootstrap.servers = 192.168.40.119:9092
|
||||
a1.channels.c1.kafka.topic = test
|
||||
a1.channels.c1.kafka.consumer.group.id = lxk_0509
|
||||
a1.channels.c1.kafka.consumer.auto.offset.reset = latest
|
||||
a1.channels.c1.kafka.consumer.enable.auto.commit = true
|
||||
a1.channels.c1.kafka.consumer.fetch.max.wait.ms = 1000
|
||||
a1.channels.c1.kafka.consumer.fetch.min.bytes = 10485760
|
||||
a1.channels.c1.parseAsFlumeEvent = false
|
||||
|
||||
#原始file roll sink,将数据写到本机磁盘
|
||||
a1.sinks.k1.type = file_roll
|
||||
a1.sinks.k1.channel = c1
|
||||
a1.sinks.k1.sink.pathManager = default
|
||||
a1.sinks.k1.sink.pathManager.extension = dat
|
||||
a1.sinks.k1.sink.pathManager.prefix = test-
|
||||
a1.sinks.k1.sink.rollInterval = 60
|
||||
a1.sinks.k1.sink.directory = /home/test/log
|
||||
25
internal-flume/conf/k2f/pro/kafka.properties
Normal file
25
internal-flume/conf/k2f/pro/kafka.properties
Normal file
@@ -0,0 +1,25 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/taildir_position.json
|
||||
a2.sources.s2.filegroups = f1
|
||||
a2.sources.s2.filegroups.f1 = /home/test/log/.*dat
|
||||
a2.sources.s2.filegroups.f2 = /home/test/log2/.*dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.203:9092
|
||||
a2.channels.c2.zookeeperConnect=192.168.40.203:2181/kafka
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
25
internal-flume/conf/k2f/pro/kafka1.properties
Normal file
25
internal-flume/conf/k2f/pro/kafka1.properties
Normal file
@@ -0,0 +1,25 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /var/taildir_position.json
|
||||
a2.sources.s2.filegroups = f1
|
||||
a2.sources.s2.filegroups.f1 = /home/test/log1/.*dat
|
||||
a2.sources.s2.filegroups.f2 = /home/test/log2/.*dat
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a2.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a2.channels.c2.brokerList = 192.168.40.203:9092
|
||||
a2.channels.c2.zookeeperConnect=192.168.40.203:2181/kafka
|
||||
a2.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a2.channels.c2.parseAsFlumeEvent = false
|
||||
a2.channels.c2.kafka.producer.acks = 1
|
||||
a2.channels.c2.producer.type=async
|
||||
a2.channels.c2.queue.buffering.max.ms = 5000
|
||||
a2.channels.c2.queue.buffering.max.messages=20000
|
||||
a2.channels.c2.batch.num.messages=500
|
||||
25
internal-flume/conf/k2f/pro/kafka2.properties
Normal file
25
internal-flume/conf/k2f/pro/kafka2.properties
Normal file
@@ -0,0 +1,25 @@
|
||||
a3.sources = s2
|
||||
a3.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a3.sources.s2.channels = c2
|
||||
#taildir source
|
||||
a3.sources.s2.type = TAILDIR
|
||||
a3.sources.s2.channels = c2
|
||||
a3.sources.s2.positionFile = /var/taildir2_position.json
|
||||
a3.sources.s2.filegroups = f2
|
||||
a3.sources.s2.filegroups.f1 = /home/test/log1/.*dat
|
||||
a3.sources.s2.filegroups.f2 = /home/test/log2/.*dat
|
||||
a3.sources.s2.maxBatchCount = 1000
|
||||
|
||||
# kafka channel充当生产者
|
||||
a3.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
a3.channels.c2.brokerList = 192.168.40.203:9092
|
||||
a3.channels.c2.zookeeperConnect=192.168.40.203:2181/kafka
|
||||
a3.channels.c2.topic = recv_test
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
a3.channels.c2.parseAsFlumeEvent = false
|
||||
a3.channels.c2.kafka.producer.acks = 1
|
||||
a3.channels.c2.producer.type=async
|
||||
a3.channels.c2.queue.buffering.max.ms = 5000
|
||||
a3.channels.c2.queue.buffering.max.messages=20000
|
||||
a3.channels.c2.batch.num.messages=500
|
||||
68
internal-flume/conf/k2f/pro/log4j.properties
Normal file
68
internal-flume/conf/k2f/pro/log4j.properties
Normal file
@@ -0,0 +1,68 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Define some default values that can be overridden by system properties.
|
||||
#
|
||||
# For testing, it may also be convenient to specify
|
||||
# -Dflume.root.logger=DEBUG,console when launching flume.
|
||||
|
||||
#flume.root.logger=DEBUG,console
|
||||
flume.root.logger=INFO,LOGFILE
|
||||
flume.log.dir=./logs
|
||||
flume.log.file=flume.log
|
||||
|
||||
log4j.logger.org.apache.flume.lifecycle = INFO
|
||||
log4j.logger.org.jboss = WARN
|
||||
log4j.logger.org.mortbay = INFO
|
||||
log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
|
||||
log4j.logger.org.apache.hadoop = INFO
|
||||
log4j.logger.org.apache.hadoop.hive = ERROR
|
||||
|
||||
# Define the root logger to the system property "flume.root.logger".
|
||||
log4j.rootLogger=${flume.root.logger}
|
||||
|
||||
|
||||
# Stock log4j rolling file appender
|
||||
# Default log rotation configuration
|
||||
log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.LOGFILE.MaxFileSize=100MB
|
||||
log4j.appender.LOGFILE.MaxBackupIndex=10
|
||||
log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
|
||||
log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
|
||||
|
||||
|
||||
# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
|
||||
# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
|
||||
# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
|
||||
# Add "DAILY" to flume.root.logger above if you want to use this
|
||||
log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
|
||||
log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
|
||||
log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
|
||||
log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{yyyy-MM-dd}
|
||||
log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
|
||||
|
||||
|
||||
# console
|
||||
# Add "console" to flume.root.logger above if you want to use this
|
||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.console.target=System.err
|
||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n
|
||||
34
internal-flume/conf/k2f/pro/tail.properties
Normal file
34
internal-flume/conf/k2f/pro/tail.properties
Normal file
@@ -0,0 +1,34 @@
|
||||
a2.sources = s2
|
||||
a2.channels = c2
|
||||
a2.sinks = k2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
a2.sources.s2.channels = c2
|
||||
|
||||
a2.sources.s2.type = TAILDIR
|
||||
a2.sources.s2.channels = c2
|
||||
a2.sources.s2.positionFile = /home/test/taildir_position.json
|
||||
a2.sources.s2.filegroups = f1 f2
|
||||
a2.sources.s2.filegroups.f1 = /home/test/log1/.*dat
|
||||
a2.sources.s2.headers.f1.headerKey1 = value1
|
||||
a2.sources.s2.filegroups.f2 = /home/test/log2/.*dat
|
||||
a2.sources.s2.headers.f2.headerKey1 = value2
|
||||
a2.sources.s2.headers.f2.headerKey2 = value2-2
|
||||
a2.sources.s2.fileHeader = true
|
||||
a2.sources.s2.maxBatchCount = 1000
|
||||
|
||||
|
||||
#指定channel为memory,即表示所有的数据都装进memory当中
|
||||
a2.channels.c2.type = memory
|
||||
a2.channels.c2.capacity = 2000000
|
||||
a2.channels.c2.transactionCapacity = 30000
|
||||
a2.channels.c2.byteCapacityBufferPercentage = 40
|
||||
a2.channels.c2.byteCapacity = 2147483648
|
||||
|
||||
#原始file roll sink,将数据写到本机磁盘
|
||||
a2.sinks.k2.type = file_roll
|
||||
a2.sinks.k2.channel = c2
|
||||
a2.sinks.k2.sink.pathManager = default
|
||||
a2.sinks.k2.sink.pathManager.extension = dat
|
||||
a2.sinks.k2.sink.pathManager.prefix = recv-
|
||||
a2.sinks.k2.sink.rollInterval = 60
|
||||
a2.sinks.k2.sink.directory = /home/test/recv1
|
||||
213
internal-flume/conf/k2f/restart_log/restart_f2k.log
Normal file
213
internal-flume/conf/k2f/restart_log/restart_f2k.log
Normal file
@@ -0,0 +1,213 @@
|
||||
##############################################################
|
||||
20200509174727 ---> the f2k_APP restart ---> 0
|
||||
20200509174737 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
##############################################################
|
||||
##############################################################
|
||||
##############################################################
|
||||
##############################################################
|
||||
20200511183420 ---> the f2k_APP restart ---> 1
|
||||
20200511183430 ---> the f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
20200511183639 ---> the f2k_APP restart ---> 1
|
||||
20200511183649 ---> the f2k_APP restart ---> 2
|
||||
20200511183659 ---> the f2k_APP restart ---> 3
|
||||
20200511183709 ---> the f2k_APP restart ---> 4
|
||||
20200511183719 ---> the f2k_APP restart ---> 5
|
||||
20200511183729 ---> the f2k_APP restart ---> 6
|
||||
20200511183739 ---> the f2k_APP restart ---> 7
|
||||
20200511183749 ---> the f2k_APP restart ---> 8
|
||||
20200511183759 ---> the f2k_APP restart ---> 9
|
||||
20200511183809 ---> the f2k_APP restart ---> 10
|
||||
20200511183819 ---> the f2k_APP restart ---> 11
|
||||
20200511183829 ---> the f2k_APP restart ---> 12
|
||||
20200511183839 ---> the f2k_APP restart ---> 13
|
||||
20200511183849 ---> the f2k_APP restart ---> 14
|
||||
20200511183859 ---> the f2k_APP restart ---> 15
|
||||
20200511183909 ---> the f2k_APP restart ---> 16
|
||||
20200511183919 ---> the f2k_APP restart ---> 17
|
||||
20200511183930 ---> the f2k_APP restart ---> 18
|
||||
20200511183940 ---> the f2k_APP restart ---> 19
|
||||
20200511183950 ---> the f2k_APP restart ---> 20
|
||||
20200511184000 ---> the f2k_APP restart ---> 21
|
||||
20200511184010 ---> the f2k_APP restart ---> 22
|
||||
20200511184020 ---> the f2k_APP restart ---> 23
|
||||
20200511184030 ---> the f2k_APP restart ---> 24
|
||||
20200511184040 ---> the f2k_APP restart ---> 25
|
||||
20200511184050 ---> the f2k_APP restart ---> 26
|
||||
20200511184100 ---> the f2k_APP restart ---> 27
|
||||
20200511184110 ---> the f2k_APP restart ---> 28
|
||||
20200511184120 ---> the f2k_APP restart ---> 29
|
||||
20200511184130 ---> the f2k_APP restart ---> 30
|
||||
20200511184140 ---> the f2k_APP restart ---> 31
|
||||
20200511184150 ---> the f2k_APP restart ---> 32
|
||||
20200511184200 ---> the f2k_APP restart ---> 33
|
||||
20200511184210 ---> the f2k_APP restart ---> 34
|
||||
20200511184220 ---> the f2k_APP restart ---> 35
|
||||
20200511184230 ---> the f2k_APP restart ---> 36
|
||||
20200511184240 ---> the f2k_APP restart ---> 37
|
||||
20200511184250 ---> the f2k_APP restart ---> 38
|
||||
20200511184300 ---> the f2k_APP restart ---> 39
|
||||
20200511184310 ---> the f2k_APP restart ---> 40
|
||||
20200511184321 ---> the f2k_APP restart ---> 41
|
||||
20200511184331 ---> the f2k_APP restart ---> 42
|
||||
20200511184341 ---> the f2k_APP restart ---> 43
|
||||
20200511184351 ---> the f2k_APP restart ---> 44
|
||||
20200511184401 ---> the f2k_APP restart ---> 45
|
||||
20200511184411 ---> the f2k_APP restart ---> 46
|
||||
20200511184421 ---> the f2k_APP restart ---> 47
|
||||
20200511184431 ---> the f2k_APP restart ---> 48
|
||||
20200511184441 ---> the f2k_APP restart ---> 49
|
||||
20200511184451 ---> the f2k_APP restart ---> 50
|
||||
20200511184501 ---> the f2k_APP restart ---> 51
|
||||
20200511184511 ---> the f2k_APP restart ---> 52
|
||||
20200511184521 ---> the f2k_APP restart ---> 53
|
||||
20200511184531 ---> the f2k_APP restart ---> 54
|
||||
20200511184541 ---> the f2k_APP restart ---> 55
|
||||
20200511184551 ---> the f2k_APP restart ---> 56
|
||||
20200511184601 ---> the f2k_APP restart ---> 57
|
||||
20200511184611 ---> the f2k_APP restart ---> 58
|
||||
20200511184621 ---> the f2k_APP restart ---> 59
|
||||
20200511184631 ---> the f2k_APP restart ---> 60
|
||||
20200511184641 ---> the f2k_APP restart ---> 61
|
||||
20200511184651 ---> the f2k_APP restart ---> 62
|
||||
20200511184701 ---> the f2k_APP restart ---> 63
|
||||
20200511184711 ---> the f2k_APP restart ---> 64
|
||||
20200511184721 ---> the f2k_APP restart ---> 65
|
||||
20200511184732 ---> the f2k_APP restart ---> 66
|
||||
20200511184742 ---> the f2k_APP restart ---> 67
|
||||
20200511184752 ---> the f2k_APP restart ---> 68
|
||||
20200511184802 ---> the f2k_APP restart ---> 69
|
||||
20200511184812 ---> the f2k_APP restart ---> 70
|
||||
20200511184822 ---> the f2k_APP restart ---> 71
|
||||
##############################################################
|
||||
20200511185311 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511185421 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511185532 ---> the f2k_APP restart ---> 1
|
||||
20200511185542 ---> the f2k_APP restart ---> 1
|
||||
20200511185552 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511185706 ---> the f2k_APP restart ---> 1
|
||||
20200511185716 ---> the f2k_APP restart ---> 1+1
|
||||
20200511185726 ---> the f2k_APP restart ---> 2+1
|
||||
##############################################################
|
||||
20200511185837 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511185938 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511190054 ---> the f2k_APP restart ---> 1
|
||||
20200511190104 ---> the f2k_APP restart ---> 1+1
|
||||
##############################################################
|
||||
20200511190604 ---> the f2k_APP restart ---> 1
|
||||
20200511190614 ---> the f2k_APP restart ---> 0
|
||||
20200511190624 ---> the f2k_APP restart ---> 1
|
||||
20200511190634 ---> the f2k_APP restart ---> 0
|
||||
##############################################################
|
||||
20200511190729 ---> the f2k_APP restart ---> 1
|
||||
20200511190739 ---> the f2k_APP restart ---> 1
|
||||
20200511190749 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511190950 ---> the f2k_APP restart ---> 1
|
||||
20200511191000 ---> the f2k_APP restart ---> 2
|
||||
20200511191010 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511191834 ---> the f2k_APP restart ---> 1
|
||||
20200511191844 ---> the f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
20200511192013 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200511192134 ---> the f2k_APP restart ---> 1
|
||||
20200511192144 ---> the f2k_APP restart ---> 2
|
||||
20200511192154 ---> the f2k_APP restart ---> 1
|
||||
20200511192204 ---> the f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
##############################################################
|
||||
##############################################################
|
||||
##############################################################
|
||||
20200511193630 ---> the f2k_APP restart ---> 1
|
||||
20200511193640 ---> the f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
20200512101249 ---> the f2k_APP restart ---> 1
|
||||
20200512101259 ---> the f2k_APP restart ---> 2
|
||||
20200512101309 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200512101433 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200512101537 ---> the f2k_APP restart ---> 1
|
||||
20200512101547 ---> the f2k_APP restart ---> 2
|
||||
20200512101557 ---> the f2k_APP restart ---> 3
|
||||
20200512101707 ---> the f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200512102643 ---> the f2k_APP restart ---> 1
|
||||
20200512102653 ---> the f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
20200512102723 ---> the f2k_APP restart ---> 1
|
||||
20200512102733 ---> the f2k_APP restart ---> 2
|
||||
20200512102743 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512102936 ---> the f2k_APP restart ---> 1
|
||||
20200512102946 ---> the f2k_APP restart ---> 2
|
||||
20200512102956 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512103250 ---> the f2k_APP restart ---> 1
|
||||
20200512103300 ---> the f2k_APP restart ---> 2
|
||||
20200512103310 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512113819 ---> the f2k_APP restart ---> 1
|
||||
20200512113829 ---> the f2k_APP restart ---> 2
|
||||
20200512113839 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
##############################################################
|
||||
20200512114211 ---> the f2k_APP restart ---> 1
|
||||
20200512114241 ---> the f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
20200512114550 ---> the f2k_APP restart ---> 1
|
||||
20200512114600 ---> the f2k_APP restart ---> 2
|
||||
20200512114610 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
##############################################################
|
||||
20200512115341 ---> the f2k_APP restart ---> 1
|
||||
20200512115351 ---> the f2k_APP restart ---> 2
|
||||
20200512115401 ---> the f2k_APP restart ---> 3
|
||||
20200512115452 ---> the f2k_APP restart ---> 1
|
||||
20200512115502 ---> the f2k_APP restart ---> 2
|
||||
20200512115512 ---> the f2k_APP restart ---> 3
|
||||
20200512115712 ---> the f2k_APP restart ---> 1
|
||||
20200512115722 ---> the f2k_APP restart ---> 2
|
||||
20200512115732 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512115933 ---> the f2k_APP restart ---> 1
|
||||
20200512115943 ---> the f2k_APP restart ---> 2
|
||||
20200512115953 ---> the f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512134239 ---> the f2k_APP restart ---> 1
|
||||
20200512134249 ---> the f2k_APP restart ---> 2
|
||||
20200512134259 ---> the f2k_APP restart ---> 3
|
||||
20200512134509 ---> the f2k_APP restart ---> 1
|
||||
20200512134519 ---> the f2k_APP restart ---> 2
|
||||
20200512134530 ---> the f2k_APP restart ---> 3
|
||||
20200512140003 ---> the f2k_APP restart ---> 1
|
||||
20200512140013 ---> the f2k_APP restart ---> 2
|
||||
20200512141056 ---> the f2k_APP restart ---> 1
|
||||
20200512141106 ---> the f2k_APP restart ---> 2
|
||||
20200512141116 ---> the f2k_APP restart ---> 3
|
||||
20200512142500 ---> the f2k_APP restart ---> 1
|
||||
20200512142510 ---> the f2k_APP restart ---> 2
|
||||
20200512142941 ---> the f2k_APP restart ---> 1
|
||||
20200512142951 ---> the f2k_APP restart ---> 2
|
||||
20200512143001 ---> the f2k_APP restart ---> 3
|
||||
20200512143031 ---> the f2k_APP restart ---> 1
|
||||
20200512143041 ---> the f2k_APP restart ---> 2
|
||||
20200512143051 ---> the f2k_APP restart ---> 3
|
||||
20200512144224 ---> the f2k_APP restart ---> 1
|
||||
20200512144234 ---> the f2k_APP restart ---> 2
|
||||
20200512144314 ---> the f2k_APP restart ---> 1
|
||||
20200512144324 ---> the f2k_APP restart ---> 2
|
||||
20200512144355 ---> the f2k_APP restart ---> 1
|
||||
20200512144405 ---> the f2k_APP restart ---> 2
|
||||
20200512144415 ---> the f2k_APP restart ---> 3
|
||||
20200512144635 ---> the f2k_APP restart ---> 1
|
||||
20200512144645 ---> the f2k_APP restart ---> 2
|
||||
20200512144655 ---> the f2k_APP restart ---> 3
|
||||
134
internal-flume/conf/k2f/restart_log/restart_security_f2k.log
Normal file
134
internal-flume/conf/k2f/restart_log/restart_security_f2k.log
Normal file
@@ -0,0 +1,134 @@
|
||||
##############################################################
|
||||
20200512180055 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180105 ---> the security_f2k_APP restart ---> 2
|
||||
##############################################################
|
||||
20200512180108 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180115 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180118 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180125 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180128 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180135 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180138 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180145 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180148 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180155 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180158 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180206 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180208 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180216 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180218 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180226 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180228 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180236 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180238 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180246 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180248 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180256 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180258 ---> the security_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512180513 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180523 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180533 ---> the security_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512180536 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180546 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180556 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180626 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180637 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180707 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180717 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180747 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180757 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180827 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180837 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180907 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180917 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180947 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180957 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181028 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181038 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181108 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181118 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181148 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181158 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181228 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181238 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181308 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181318 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181348 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181358 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181429 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181439 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181449 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181519 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181529 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181539 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181609 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181619 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181649 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181659 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181729 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181739 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181809 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181820 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181850 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181900 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181910 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181920 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181930 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181940 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181950 ---> the security_f2k_APP restart ---> 1
|
||||
20200512182000 ---> the security_f2k_APP restart ---> 2
|
||||
20200512182010 ---> the security_f2k_APP restart ---> 3
|
||||
20200512182020 ---> the security_f2k_APP restart ---> 1
|
||||
20200512182030 ---> the security_f2k_APP restart ---> 2
|
||||
20200512182040 ---> the security_f2k_APP restart ---> 3
|
||||
20200512182050 ---> the security_f2k_APP restart ---> 1
|
||||
20200512182100 ---> the security_f2k_APP restart ---> 2
|
||||
20200512182110 ---> the security_f2k_APP restart ---> 3
|
||||
20200512182120 ---> the security_f2k_APP restart ---> 1
|
||||
20200512182130 ---> the security_f2k_APP restart ---> 2
|
||||
20200512182140 ---> the security_f2k_APP restart ---> 3
|
||||
20200512182150 ---> the security_f2k_APP restart ---> 1
|
||||
20200512182200 ---> the security_f2k_APP restart ---> 2
|
||||
20200512182211 ---> the security_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512183204 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183215 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183225 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183255 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183305 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183335 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183345 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183415 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183425 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183455 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183505 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183535 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183545 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183615 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183626 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183656 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183706 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183716 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183726 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183736 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183746 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183756 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183806 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183836 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183846 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183916 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183926 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183936 ---> the security_f2k_APP restart ---> 3
|
||||
20200512184006 ---> the security_f2k_APP restart ---> 1
|
||||
20200512184016 ---> the security_f2k_APP restart ---> 2
|
||||
20200512184027 ---> the security_f2k_APP restart ---> 3
|
||||
20200512184037 ---> the security_f2k_APP restart ---> 1
|
||||
20200512184047 ---> the security_f2k_APP restart ---> 2
|
||||
20200512184057 ---> the security_f2k_APP restart ---> 3
|
||||
20200512184107 ---> the security_f2k_APP restart ---> 1
|
||||
20200512184117 ---> the security_f2k_APP restart ---> 2
|
||||
20200512184127 ---> the security_f2k_APP restart ---> 3
|
||||
20200512184157 ---> the security_f2k_APP restart ---> 1
|
||||
20200512184207 ---> the security_f2k_APP restart ---> 2
|
||||
5
internal-flume/conf/k2f/start_flume.sh
Executable file
5
internal-flume/conf/k2f/start_flume.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup ${BASE_DIR}/dae_f2k.sh >/dev/null 2>&1 &
|
||||
28
internal-flume/conf/k2f/stop_flume.sh
Executable file
28
internal-flume/conf/k2f/stop_flume.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
#DAE_NAME=dae_k2ha.sh
|
||||
#JAR_NAME=k2ha.properties
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep ^f2* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${DAE_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${DAE_NAME} | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM1}" -ge "1" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
kill -9 $pid1
|
||||
echo 'killed '${DAE_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
|
||||
NUM2=`ps -ef | grep ${JAR_NAME} | grep -v grep | wc -l`
|
||||
pids2=$(ps -ef | grep ${JAR_NAME} | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM2}" -ge "1" ];then
|
||||
for pid2 in $pids2
|
||||
do
|
||||
kill -9 $pid2
|
||||
echo 'killed '${JAR_NAME}' process-->'$pid2
|
||||
done
|
||||
fi
|
||||
10
internal-flume/conf/proxy/clean_start_flume.sh
Executable file
10
internal-flume/conf/proxy/clean_start_flume.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
|
||||
|
||||
#清除日志和checkpoint以及历史data
|
||||
rm -rf ${BASE_DIR}/logs/*
|
||||
|
||||
nohup ${BASE_DIR}/${DAE_NAME} $1 $2 >/dev/null 2>&1 &
|
||||
15
internal-flume/conf/proxy/count_flume.sh
Executable file
15
internal-flume/conf/proxy/count_flume.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${JAR_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${JAR_NAME} | grep -v grep | awk '{print $2}')
|
||||
echo 'flume '${JAR_NAME}' total process-->'${NUM1}
|
||||
if [ "${NUM1}" -ge "5" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
echo 'flume '${JAR_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
39
internal-flume/conf/proxy/dae_proxy.sh
Executable file
39
internal-flume/conf/proxy/dae_proxy.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
#JAR_NAME=ktk_ip_asn.properties
|
||||
PROPERTIES_NAME=proxy_f2k
|
||||
#flume进程名称
|
||||
FLUME_NAME=proxy
|
||||
#flume根目录
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#当前路径
|
||||
CONF_DIR=$(cd $(dirname $0); pwd)
|
||||
#进程总数
|
||||
PROCESS_SUM=5
|
||||
|
||||
echo "##############################################################" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
id=0 #配置文件后面的序号,无特殊作用
|
||||
flag=0 # flag为0表示初始化状态,为1表示完整启动成功所有进程,为2表示进程意外停止之后又重新杀死其余的进程并重新启动所有进程
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep ${PROPERTIES_NAME} | grep -v grep | grep -v dae |wc -l`
|
||||
pids=$(ps -ef | grep ${PROPERTIES_NAME}\* | grep properties | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
if [[ "${NUM}" -ne ${PROCESS_SUM} && $flag -eq "1" ]];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
flag=2
|
||||
#如果正在运行的进程数小于定义的进程数,就启动
|
||||
elif [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
id=$(( ( ($id) % $PROCESS_SUM ) + 1 ))
|
||||
nohup ${BASE_DIR}/bin/flume-ng agent -n ${FLUME_NAME} -c ${CONF_DIR} -f ${CONF_DIR}/${PROPERTIES_NAME}$id.properties >/dev/null 2>&1 &
|
||||
echo "${time_stamp} ---> the ${PROPERTIES_NAME}_APP restart ---> $id" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
#进程数如果等于指定的进程数,那就设置flag为1 id为0
|
||||
elif [ "${NUM}" -eq ${PROCESS_SUM} ];then
|
||||
flag=1
|
||||
id=0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
35
internal-flume/conf/proxy/flume-env.sh
Executable file
35
internal-flume/conf/proxy/flume-env.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
|
||||
# during Flume startup.
|
||||
|
||||
# Enviroment variables can be set here.
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
|
||||
|
||||
# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
|
||||
#export JAVA_OPTS="-Xms1024m -Xmx3072m -Dcom.sun.management.jmxremote"
|
||||
export JAVA_OPTS="-Xms512m -Xmx2048m -Xss256k -Xmn1g -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Dcom.sun.management.jmxremote"
|
||||
|
||||
# Let Flume write raw event data and configuration information to its log files for debugging
|
||||
# purposes. Enabling these flags is not recommended in production,
|
||||
# as it may result in logging sensitive user information or encryption secrets.
|
||||
# export JAVA_OPTS="$JAVA_OPTS -Dorg.apache.flume.log.rawdata=true -Dorg.apache.flume.log.printconfig=true "
|
||||
|
||||
# Note that the Flume conf directory is always included in the classpath.
|
||||
#FLUME_CLASSPATH=""
|
||||
|
||||
40
internal-flume/conf/proxy/proxy_f2k1.properties
Normal file
40
internal-flume/conf/proxy/proxy_f2k1.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
proxy.sources = s2
|
||||
proxy.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
proxy.sources.s2.channels = c2
|
||||
#taildir source
|
||||
proxy.sources.s2.type = TAILDIR
|
||||
proxy.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
proxy.sources.s2.positionFile = /home/test/1proxy_position.json
|
||||
#需要分组13台主机的数据
|
||||
proxy.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
proxy.sources.s2.filegroups.f1 = /home/data/192.168.60.101/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f2 = /home/data/192.168.60.102/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f3 = /home/data/192.168.60.103/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f4 = /home/data/192.168.60.104/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f5 = /home/data/192.168.60.105/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f6 = /home/data/192.168.60.106/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f7 = /home/data/192.168.60.107/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f8 = /home/data/192.168.60.108/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f9 = /home/data/192.168.60.109/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f10 = /home/data/192.168.60.110/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f11 = /home/data/192.168.60.111/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f12 = /home/data/192.168.60.112/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
proxy.sources.s2.filegroups.f13 = /home/data/192.168.60.113/PROXY-EVENT-LOG/.*[0-1].dat
|
||||
|
||||
|
||||
proxy.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
proxy.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
proxy.channels.c2.brokerList = 192.168.40.222:9093
|
||||
proxy.channels.c2.topic = PROXY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
proxy.channels.c2.parseAsFlumeEvent = false
|
||||
proxy.channels.c2.kafka.producer.acks = 1
|
||||
proxy.channels.c2.producer.type=sync
|
||||
proxy.channels.c2.queue.buffering.max.ms = 5000
|
||||
proxy.channels.c2.queue.buffering.max.messages=20000
|
||||
proxy.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/proxy/proxy_f2k2.properties
Normal file
40
internal-flume/conf/proxy/proxy_f2k2.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
proxy.sources = s2
|
||||
proxy.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
proxy.sources.s2.channels = c2
|
||||
#taildir source
|
||||
proxy.sources.s2.type = TAILDIR
|
||||
proxy.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
proxy.sources.s2.positionFile = /home/test/2proxy_position.json
|
||||
#需要分组13台主机的数据
|
||||
proxy.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
proxy.sources.s2.filegroups.f1 = /home/data/192.168.60.101/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f2 = /home/data/192.168.60.102/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f3 = /home/data/192.168.60.103/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f4 = /home/data/192.168.60.104/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f5 = /home/data/192.168.60.105/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f6 = /home/data/192.168.60.106/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f7 = /home/data/192.168.60.107/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f8 = /home/data/192.168.60.108/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f9 = /home/data/192.168.60.109/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f10 = /home/data/192.168.60.110/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f11 = /home/data/192.168.60.111/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f12 = /home/data/192.168.60.112/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
proxy.sources.s2.filegroups.f13 = /home/data/192.168.60.113/PROXY-EVENT-LOG/.*[2-3].dat
|
||||
|
||||
|
||||
proxy.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
proxy.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
proxy.channels.c2.brokerList = 192.168.40.222:9093
|
||||
proxy.channels.c2.topic = PROXY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
proxy.channels.c2.parseAsFlumeEvent = false
|
||||
proxy.channels.c2.kafka.producer.acks = 1
|
||||
proxy.channels.c2.producer.type=sync
|
||||
proxy.channels.c2.queue.buffering.max.ms = 5000
|
||||
proxy.channels.c2.queue.buffering.max.messages=20000
|
||||
proxy.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/proxy/proxy_f2k3.properties
Normal file
40
internal-flume/conf/proxy/proxy_f2k3.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
proxy.sources = s2
|
||||
proxy.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
proxy.sources.s2.channels = c2
|
||||
#taildir source
|
||||
proxy.sources.s2.type = TAILDIR
|
||||
proxy.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
proxy.sources.s2.positionFile = /home/test/3proxy_position.json
|
||||
#需要分组13台主机的数据
|
||||
proxy.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
proxy.sources.s2.filegroups.f1 = /home/data/192.168.60.101/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f2 = /home/data/192.168.60.102/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f3 = /home/data/192.168.60.103/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f4 = /home/data/192.168.60.104/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f5 = /home/data/192.168.60.105/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f6 = /home/data/192.168.60.106/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f7 = /home/data/192.168.60.107/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f8 = /home/data/192.168.60.108/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f9 = /home/data/192.168.60.109/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f10 = /home/data/192.168.60.110/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f11 = /home/data/192.168.60.111/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f12 = /home/data/192.168.60.112/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
proxy.sources.s2.filegroups.f13 = /home/data/192.168.60.113/PROXY-EVENT-LOG/.*[4-5].dat
|
||||
|
||||
|
||||
proxy.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
proxy.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
proxy.channels.c2.brokerList = 192.168.40.222:9093
|
||||
proxy.channels.c2.topic = PROXY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
proxy.channels.c2.parseAsFlumeEvent = false
|
||||
proxy.channels.c2.kafka.producer.acks = 1
|
||||
proxy.channels.c2.producer.type=sync
|
||||
proxy.channels.c2.queue.buffering.max.ms = 5000
|
||||
proxy.channels.c2.queue.buffering.max.messages=20000
|
||||
proxy.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/proxy/proxy_f2k4.properties
Normal file
40
internal-flume/conf/proxy/proxy_f2k4.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
proxy.sources = s2
|
||||
proxy.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
proxy.sources.s2.channels = c2
|
||||
#taildir source
|
||||
proxy.sources.s2.type = TAILDIR
|
||||
proxy.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
proxy.sources.s2.positionFile = /home/test/4proxy_position.json
|
||||
#需要分组13台主机的数据
|
||||
proxy.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
proxy.sources.s2.filegroups.f1 = /home/data/192.168.60.101/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f2 = /home/data/192.168.60.102/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f3 = /home/data/192.168.60.103/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f4 = /home/data/192.168.60.104/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f5 = /home/data/192.168.60.105/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f6 = /home/data/192.168.60.106/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f7 = /home/data/192.168.60.107/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f8 = /home/data/192.168.60.108/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f9 = /home/data/192.168.60.109/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f10 = /home/data/192.168.60.110/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f11 = /home/data/192.168.60.111/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f12 = /home/data/192.168.60.112/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
proxy.sources.s2.filegroups.f13 = /home/data/192.168.60.113/PROXY-EVENT-LOG/.*[6-7].dat
|
||||
|
||||
|
||||
proxy.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
proxy.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
proxy.channels.c2.brokerList = 192.168.40.222:9093
|
||||
proxy.channels.c2.topic = PROXY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
proxy.channels.c2.parseAsFlumeEvent = false
|
||||
proxy.channels.c2.kafka.producer.acks = 1
|
||||
proxy.channels.c2.producer.type=sync
|
||||
proxy.channels.c2.queue.buffering.max.ms = 5000
|
||||
proxy.channels.c2.queue.buffering.max.messages=20000
|
||||
proxy.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/proxy/proxy_f2k5.properties
Normal file
40
internal-flume/conf/proxy/proxy_f2k5.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
proxy.sources = s2
|
||||
proxy.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
proxy.sources.s2.channels = c2
|
||||
#taildir source
|
||||
proxy.sources.s2.type = TAILDIR
|
||||
proxy.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
proxy.sources.s2.positionFile = /home/test/5proxy_position.json
|
||||
#需要分组13台主机的数据
|
||||
proxy.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
proxy.sources.s2.filegroups.f1 = /home/data/192.168.60.101/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f2 = /home/data/192.168.60.102/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f3 = /home/data/192.168.60.103/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f4 = /home/data/192.168.60.104/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f5 = /home/data/192.168.60.105/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f6 = /home/data/192.168.60.106/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f7 = /home/data/192.168.60.107/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f8 = /home/data/192.168.60.108/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f9 = /home/data/192.168.60.109/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f10 = /home/data/192.168.60.110/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f11 = /home/data/192.168.60.111/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f12 = /home/data/192.168.60.112/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
proxy.sources.s2.filegroups.f13 = /home/data/192.168.60.113/PROXY-EVENT-LOG/.*[8-9].dat
|
||||
|
||||
|
||||
proxy.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
proxy.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
proxy.channels.c2.brokerList = 192.168.40.222:9093
|
||||
proxy.channels.c2.topic = PROXY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
proxy.channels.c2.parseAsFlumeEvent = false
|
||||
proxy.channels.c2.kafka.producer.acks = 1
|
||||
proxy.channels.c2.producer.type=sync
|
||||
proxy.channels.c2.queue.buffering.max.ms = 5000
|
||||
proxy.channels.c2.queue.buffering.max.messages=20000
|
||||
proxy.channels.c2.batch.num.messages=5000
|
||||
34
internal-flume/conf/proxy/restart_log/restart_proxy_f2k.log
Normal file
34
internal-flume/conf/proxy/restart_log/restart_proxy_f2k.log
Normal file
@@ -0,0 +1,34 @@
|
||||
##############################################################
|
||||
20200512174751 ---> the proxy_f2k_APP restart ---> 1
|
||||
20200512174801 ---> the proxy_f2k_APP restart ---> 2
|
||||
20200512174811 ---> the proxy_f2k_APP restart ---> 3
|
||||
20200512174821 ---> the proxy_f2k_APP restart ---> 4
|
||||
20200512174831 ---> the proxy_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512175511 ---> the proxy_f2k_APP restart ---> 1
|
||||
20200512175521 ---> the proxy_f2k_APP restart ---> 2
|
||||
20200512175531 ---> the proxy_f2k_APP restart ---> 3
|
||||
20200512175541 ---> the proxy_f2k_APP restart ---> 4
|
||||
20200512175551 ---> the proxy_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512180513 ---> the proxy_f2k_APP restart ---> 1
|
||||
20200512180523 ---> the proxy_f2k_APP restart ---> 2
|
||||
20200512180533 ---> the proxy_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512180536 ---> the proxy_f2k_APP restart ---> 1
|
||||
20200512180546 ---> the proxy_f2k_APP restart ---> 2
|
||||
20200512180556 ---> the proxy_f2k_APP restart ---> 3
|
||||
20200512180606 ---> the proxy_f2k_APP restart ---> 4
|
||||
20200512180616 ---> the proxy_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512183204 ---> the proxy_f2k_APP restart ---> 1
|
||||
20200512183214 ---> the proxy_f2k_APP restart ---> 2
|
||||
20200512183224 ---> the proxy_f2k_APP restart ---> 3
|
||||
20200512183234 ---> the proxy_f2k_APP restart ---> 4
|
||||
20200512183245 ---> the proxy_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512184503 ---> the proxy_f2k_APP restart ---> 1
|
||||
20200512184513 ---> the proxy_f2k_APP restart ---> 2
|
||||
20200512184523 ---> the proxy_f2k_APP restart ---> 3
|
||||
20200512184533 ---> the proxy_f2k_APP restart ---> 4
|
||||
20200512184543 ---> the proxy_f2k_APP restart ---> 5
|
||||
5
internal-flume/conf/proxy/start_flume.sh
Executable file
5
internal-flume/conf/proxy/start_flume.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup ${BASE_DIR}/dae_f2k.sh >/dev/null 2>&1 &
|
||||
31
internal-flume/conf/proxy/stop_flume.sh
Executable file
31
internal-flume/conf/proxy/stop_flume.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/sh
|
||||
|
||||
#DAE_NAME=dae_k2ha.sh
|
||||
#JAR_NAME=k2ha.properties
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k\* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${DAE_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${DAE_NAME} | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM1}" -ge "1" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
kill -9 $pid1
|
||||
echo 'killed '${DAE_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
|
||||
for jar in $JAR_NAME ; do
|
||||
|
||||
NUM2=`ps -ef | grep $jar | grep -v grep | wc -l`
|
||||
pids2=$(ps -ef | grep $jar | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM2}" -ge "1" ];then
|
||||
for pid2 in $pids2
|
||||
do
|
||||
kill -9 $pid2
|
||||
echo 'killed '${JAR_NAME}' process-->'$pid2
|
||||
done
|
||||
fi
|
||||
done
|
||||
10
internal-flume/conf/radius/clean_start_flume.sh
Executable file
10
internal-flume/conf/radius/clean_start_flume.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
|
||||
|
||||
#清除日志和checkpoint以及历史data
|
||||
rm -rf ${BASE_DIR}/logs/*
|
||||
|
||||
nohup ${BASE_DIR}/${DAE_NAME} $1 $2 >/dev/null 2>&1 &
|
||||
15
internal-flume/conf/radius/count_flume.sh
Executable file
15
internal-flume/conf/radius/count_flume.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${JAR_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${JAR_NAME} | grep -v grep | awk '{print $2}')
|
||||
echo 'flume '${JAR_NAME}' total process-->'${NUM1}
|
||||
if [ "${NUM1}" -ge "5" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
echo 'flume '${JAR_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
39
internal-flume/conf/radius/dae_radius.sh
Executable file
39
internal-flume/conf/radius/dae_radius.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
#JAR_NAME=ktk_ip_asn.properties
|
||||
PROPERTIES_NAME=radius_f2k
|
||||
#flume进程名称
|
||||
FLUME_NAME=radius
|
||||
#flume根目录
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#当前路径
|
||||
CONF_DIR=$(cd $(dirname $0); pwd)
|
||||
#进程总数
|
||||
PROCESS_SUM=5
|
||||
|
||||
echo "##############################################################" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
id=0 #配置文件后面的序号,无特殊作用
|
||||
flag=0 # flag为0表示初始化状态,为1表示完整启动成功所有进程,为2表示进程意外停止之后又重新杀死其余的进程并重新启动所有进程
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep ${PROPERTIES_NAME} | grep -v grep | grep -v dae |wc -l`
|
||||
pids=$(ps -ef | grep ${PROPERTIES_NAME}\* | grep properties | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
if [[ "${NUM}" -ne ${PROCESS_SUM} && $flag -eq "1" ]];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
flag=2
|
||||
#如果正在运行的进程数小于定义的进程数,就启动
|
||||
elif [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
id=$(( ( ($id) % $PROCESS_SUM ) + 1 ))
|
||||
nohup ${BASE_DIR}/bin/flume-ng agent -n ${FLUME_NAME} -c ${CONF_DIR} -f ${CONF_DIR}/${PROPERTIES_NAME}$id.properties >/dev/null 2>&1 &
|
||||
echo "${time_stamp} ---> the ${PROPERTIES_NAME}_APP restart ---> $id" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
#进程数如果等于指定的进程数,那就设置flag为1 id为0
|
||||
elif [ "${NUM}" -eq ${PROCESS_SUM} ];then
|
||||
flag=1
|
||||
id=0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
35
internal-flume/conf/radius/flume-env.sh
Executable file
35
internal-flume/conf/radius/flume-env.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
|
||||
# during Flume startup.
|
||||
|
||||
# Enviroment variables can be set here.
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
|
||||
|
||||
# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
|
||||
#export JAVA_OPTS="-Xms1024m -Xmx3072m -Dcom.sun.management.jmxremote"
|
||||
export JAVA_OPTS="-Xms512m -Xmx2048m -Xss256k -Xmn1g -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Dcom.sun.management.jmxremote"
|
||||
|
||||
# Let Flume write raw event data and configuration information to its log files for debugging
|
||||
# purposes. Enabling these flags is not recommended in production,
|
||||
# as it may result in logging sensitive user information or encryption secrets.
|
||||
# export JAVA_OPTS="$JAVA_OPTS -Dorg.apache.flume.log.rawdata=true -Dorg.apache.flume.log.printconfig=true "
|
||||
|
||||
# Note that the Flume conf directory is always included in the classpath.
|
||||
#FLUME_CLASSPATH=""
|
||||
|
||||
40
internal-flume/conf/radius/radius_f2k1.properties
Normal file
40
internal-flume/conf/radius/radius_f2k1.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
radius.sources = s2
|
||||
radius.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
radius.sources.s2.channels = c2
|
||||
#taildir source
|
||||
radius.sources.s2.type = TAILDIR
|
||||
radius.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
radius.sources.s2.positionFile = /home/test/1radius_position.json
|
||||
#需要分组13台主机的数据
|
||||
radius.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
radius.sources.s2.filegroups.f1 = /home/data/192.168.60.101/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f2 = /home/data/192.168.60.102/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f3 = /home/data/192.168.60.103/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f4 = /home/data/192.168.60.104/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f5 = /home/data/192.168.60.105/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f6 = /home/data/192.168.60.106/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f7 = /home/data/192.168.60.107/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f8 = /home/data/192.168.60.108/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f9 = /home/data/192.168.60.109/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f10 = /home/data/192.168.60.110/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f11 = /home/data/192.168.60.111/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f12 = /home/data/192.168.60.112/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
radius.sources.s2.filegroups.f13 = /home/data/192.168.60.113/RADIUS-RECORD-LOG/.*[0-1].dat
|
||||
|
||||
|
||||
radius.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
radius.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
radius.channels.c2.brokerList = 192.168.40.222:9093
|
||||
radius.channels.c2.topic = RADIUS-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
radius.channels.c2.parseAsFlumeEvent = false
|
||||
radius.channels.c2.kafka.producer.acks = 1
|
||||
radius.channels.c2.producer.type=sync
|
||||
radius.channels.c2.queue.buffering.max.ms = 5000
|
||||
radius.channels.c2.queue.buffering.max.messages=20000
|
||||
radius.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/radius/radius_f2k2.properties
Normal file
40
internal-flume/conf/radius/radius_f2k2.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
radius.sources = s2
|
||||
radius.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
radius.sources.s2.channels = c2
|
||||
#taildir source
|
||||
radius.sources.s2.type = TAILDIR
|
||||
radius.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
radius.sources.s2.positionFile = /home/test/2radius_position.json
|
||||
#需要分组13台主机的数据
|
||||
radius.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
radius.sources.s2.filegroups.f1 = /home/data/192.168.60.101/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f2 = /home/data/192.168.60.102/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f3 = /home/data/192.168.60.103/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f4 = /home/data/192.168.60.104/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f5 = /home/data/192.168.60.105/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f6 = /home/data/192.168.60.106/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f7 = /home/data/192.168.60.107/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f8 = /home/data/192.168.60.108/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f9 = /home/data/192.168.60.109/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f10 = /home/data/192.168.60.110/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f11 = /home/data/192.168.60.111/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f12 = /home/data/192.168.60.112/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
radius.sources.s2.filegroups.f13 = /home/data/192.168.60.113/RADIUS-RECORD-LOG/.*[2-3].dat
|
||||
|
||||
|
||||
radius.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
radius.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
radius.channels.c2.brokerList = 192.168.40.222:9093
|
||||
radius.channels.c2.topic = RADIUS-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
radius.channels.c2.parseAsFlumeEvent = false
|
||||
radius.channels.c2.kafka.producer.acks = 1
|
||||
radius.channels.c2.producer.type=sync
|
||||
radius.channels.c2.queue.buffering.max.ms = 5000
|
||||
radius.channels.c2.queue.buffering.max.messages=20000
|
||||
radius.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/radius/radius_f2k3.properties
Normal file
40
internal-flume/conf/radius/radius_f2k3.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
radius.sources = s2
|
||||
radius.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
radius.sources.s2.channels = c2
|
||||
#taildir source
|
||||
radius.sources.s2.type = TAILDIR
|
||||
radius.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
radius.sources.s2.positionFile = /home/test/3radius_position.json
|
||||
#需要分组13台主机的数据
|
||||
radius.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
radius.sources.s2.filegroups.f1 = /home/data/192.168.60.101/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f2 = /home/data/192.168.60.102/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f3 = /home/data/192.168.60.103/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f4 = /home/data/192.168.60.104/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f5 = /home/data/192.168.60.105/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f6 = /home/data/192.168.60.106/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f7 = /home/data/192.168.60.107/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f8 = /home/data/192.168.60.108/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f9 = /home/data/192.168.60.109/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f10 = /home/data/192.168.60.110/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f11 = /home/data/192.168.60.111/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f12 = /home/data/192.168.60.112/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
radius.sources.s2.filegroups.f13 = /home/data/192.168.60.113/RADIUS-RECORD-LOG/.*[4-5].dat
|
||||
|
||||
|
||||
radius.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
radius.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
radius.channels.c2.brokerList = 192.168.40.222:9093
|
||||
radius.channels.c2.topic = RADIUS-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
radius.channels.c2.parseAsFlumeEvent = false
|
||||
radius.channels.c2.kafka.producer.acks = 1
|
||||
radius.channels.c2.producer.type=sync
|
||||
radius.channels.c2.queue.buffering.max.ms = 5000
|
||||
radius.channels.c2.queue.buffering.max.messages=20000
|
||||
radius.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/radius/radius_f2k4.properties
Normal file
40
internal-flume/conf/radius/radius_f2k4.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
radius.sources = s2
|
||||
radius.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
radius.sources.s2.channels = c2
|
||||
#taildir source
|
||||
radius.sources.s2.type = TAILDIR
|
||||
radius.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
radius.sources.s2.positionFile = /home/test/4radius_position.json
|
||||
#需要分组13台主机的数据
|
||||
radius.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
radius.sources.s2.filegroups.f1 = /home/data/192.168.60.101/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f2 = /home/data/192.168.60.102/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f3 = /home/data/192.168.60.103/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f4 = /home/data/192.168.60.104/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f5 = /home/data/192.168.60.105/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f6 = /home/data/192.168.60.106/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f7 = /home/data/192.168.60.107/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f8 = /home/data/192.168.60.108/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f9 = /home/data/192.168.60.109/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f10 = /home/data/192.168.60.110/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f11 = /home/data/192.168.60.111/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f12 = /home/data/192.168.60.112/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
radius.sources.s2.filegroups.f13 = /home/data/192.168.60.113/RADIUS-RECORD-LOG/.*[6-7].dat
|
||||
|
||||
|
||||
radius.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
radius.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
radius.channels.c2.brokerList = 192.168.40.222:9093
|
||||
radius.channels.c2.topic = RADIUS-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
radius.channels.c2.parseAsFlumeEvent = false
|
||||
radius.channels.c2.kafka.producer.acks = 1
|
||||
radius.channels.c2.producer.type=sync
|
||||
radius.channels.c2.queue.buffering.max.ms = 5000
|
||||
radius.channels.c2.queue.buffering.max.messages=20000
|
||||
radius.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/radius/radius_f2k5.properties
Normal file
40
internal-flume/conf/radius/radius_f2k5.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
radius.sources = s2
|
||||
radius.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
radius.sources.s2.channels = c2
|
||||
#taildir source
|
||||
radius.sources.s2.type = TAILDIR
|
||||
radius.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
radius.sources.s2.positionFile = /home/test/5radius_position.json
|
||||
#需要分组13台主机的数据
|
||||
radius.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
radius.sources.s2.filegroups.f1 = /home/data/192.168.60.101/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f2 = /home/data/192.168.60.102/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f3 = /home/data/192.168.60.103/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f4 = /home/data/192.168.60.104/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f5 = /home/data/192.168.60.105/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f6 = /home/data/192.168.60.106/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f7 = /home/data/192.168.60.107/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f8 = /home/data/192.168.60.108/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f9 = /home/data/192.168.60.109/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f10 = /home/data/192.168.60.110/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f11 = /home/data/192.168.60.111/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f12 = /home/data/192.168.60.112/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
radius.sources.s2.filegroups.f13 = /home/data/192.168.60.113/RADIUS-RECORD-LOG/.*[8-9].dat
|
||||
|
||||
|
||||
radius.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
radius.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
radius.channels.c2.brokerList = 192.168.40.222:9093
|
||||
radius.channels.c2.topic = RADIUS-RECORD-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
radius.channels.c2.parseAsFlumeEvent = false
|
||||
radius.channels.c2.kafka.producer.acks = 1
|
||||
radius.channels.c2.producer.type=sync
|
||||
radius.channels.c2.queue.buffering.max.ms = 5000
|
||||
radius.channels.c2.queue.buffering.max.messages=20000
|
||||
radius.channels.c2.batch.num.messages=5000
|
||||
@@ -0,0 +1,28 @@
|
||||
##############################################################
|
||||
20200512173126 ---> the radius_f2k_APP restart ---> 1
|
||||
20200512173136 ---> the radius_f2k_APP restart ---> 2
|
||||
20200512173146 ---> the radius_f2k_APP restart ---> 3
|
||||
20200512173156 ---> the radius_f2k_APP restart ---> 4
|
||||
20200512173206 ---> the radius_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512180513 ---> the radius_f2k_APP restart ---> 1
|
||||
20200512180523 ---> the radius_f2k_APP restart ---> 2
|
||||
20200512180533 ---> the radius_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512180536 ---> the radius_f2k_APP restart ---> 1
|
||||
20200512180546 ---> the radius_f2k_APP restart ---> 2
|
||||
20200512180556 ---> the radius_f2k_APP restart ---> 3
|
||||
20200512180606 ---> the radius_f2k_APP restart ---> 4
|
||||
20200512180616 ---> the radius_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512183204 ---> the radius_f2k_APP restart ---> 1
|
||||
20200512183214 ---> the radius_f2k_APP restart ---> 2
|
||||
20200512183224 ---> the radius_f2k_APP restart ---> 3
|
||||
20200512183235 ---> the radius_f2k_APP restart ---> 4
|
||||
20200512183245 ---> the radius_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512184503 ---> the radius_f2k_APP restart ---> 1
|
||||
20200512184513 ---> the radius_f2k_APP restart ---> 2
|
||||
20200512184523 ---> the radius_f2k_APP restart ---> 3
|
||||
20200512184533 ---> the radius_f2k_APP restart ---> 4
|
||||
20200512184543 ---> the radius_f2k_APP restart ---> 5
|
||||
5
internal-flume/conf/radius/start_flume.sh
Executable file
5
internal-flume/conf/radius/start_flume.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup ${BASE_DIR}/dae_f2k.sh >/dev/null 2>&1 &
|
||||
31
internal-flume/conf/radius/stop_flume.sh
Executable file
31
internal-flume/conf/radius/stop_flume.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/sh
|
||||
|
||||
#DAE_NAME=dae_k2ha.sh
|
||||
#JAR_NAME=k2ha.properties
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k\* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${DAE_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${DAE_NAME} | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM1}" -ge "1" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
kill -9 $pid1
|
||||
echo 'killed '${DAE_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
|
||||
for jar in $JAR_NAME ; do
|
||||
|
||||
NUM2=`ps -ef | grep $jar | grep -v grep | wc -l`
|
||||
pids2=$(ps -ef | grep $jar | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM2}" -ge "1" ];then
|
||||
for pid2 in $pids2
|
||||
do
|
||||
kill -9 $pid2
|
||||
echo 'killed '${JAR_NAME}' process-->'$pid2
|
||||
done
|
||||
fi
|
||||
done
|
||||
10
internal-flume/conf/security/clean_start_flume.sh
Executable file
10
internal-flume/conf/security/clean_start_flume.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
|
||||
|
||||
#清除日志和checkpoint以及历史data
|
||||
rm -rf ${BASE_DIR}/logs/*
|
||||
|
||||
nohup ${BASE_DIR}/${DAE_NAME} $1 $2 >/dev/null 2>&1 &
|
||||
15
internal-flume/conf/security/count_flume.sh
Executable file
15
internal-flume/conf/security/count_flume.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${JAR_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${JAR_NAME} | grep -v grep | awk '{print $2}')
|
||||
echo 'flume '${JAR_NAME}' total process-->'${NUM1}
|
||||
if [ "${NUM1}" -ge "5" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
echo 'flume '${JAR_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
39
internal-flume/conf/security/dae_security.sh
Executable file
39
internal-flume/conf/security/dae_security.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
#JAR_NAME=ktk_ip_asn.properties
|
||||
PROPERTIES_NAME=security_f2k
|
||||
#flume进程名称
|
||||
FLUME_NAME=security
|
||||
#flume根目录
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#当前路径
|
||||
CONF_DIR=$(cd $(dirname $0); pwd)
|
||||
#进程总数
|
||||
PROCESS_SUM=5
|
||||
|
||||
echo "##############################################################" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
id=0 #配置文件后面的序号,无特殊作用
|
||||
flag=0 # flag为0表示初始化状态,为1表示完整启动成功所有进程,为2表示进程意外停止之后又重新杀死其余的进程并重新启动所有进程
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep ${PROPERTIES_NAME} | grep -v grep | grep -v dae |wc -l`
|
||||
pids=$(ps -ef | grep ${PROPERTIES_NAME}\* | grep properties | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
if [[ "${NUM}" -ne ${PROCESS_SUM} && $flag -eq "1" ]];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
flag=2
|
||||
#如果正在运行的进程数小于定义的进程数,就启动
|
||||
elif [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
id=$(( ( ($id) % $PROCESS_SUM ) + 1 ))
|
||||
nohup ${BASE_DIR}/bin/flume-ng agent -n ${FLUME_NAME} -c ${CONF_DIR} -f ${CONF_DIR}/${PROPERTIES_NAME}$id.properties >/dev/null 2>&1 &
|
||||
echo "${time_stamp} ---> the ${PROPERTIES_NAME}_APP restart ---> $id" >> ${CONF_DIR}/restart_log/restart_${PROPERTIES_NAME}.log
|
||||
#进程数如果等于指定的进程数,那就设置flag为1 id为0
|
||||
elif [ "${NUM}" -eq ${PROCESS_SUM} ];then
|
||||
flag=1
|
||||
id=0
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
35
internal-flume/conf/security/flume-env.sh
Executable file
35
internal-flume/conf/security/flume-env.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
|
||||
# during Flume startup.
|
||||
|
||||
# Enviroment variables can be set here.
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_73
|
||||
|
||||
# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
|
||||
#export JAVA_OPTS="-Xms1024m -Xmx3072m -Dcom.sun.management.jmxremote"
|
||||
export JAVA_OPTS="-Xms512m -Xmx2048m -Xss256k -Xmn1g -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Dcom.sun.management.jmxremote"
|
||||
|
||||
# Let Flume write raw event data and configuration information to its log files for debugging
|
||||
# purposes. Enabling these flags is not recommended in production,
|
||||
# as it may result in logging sensitive user information or encryption secrets.
|
||||
# export JAVA_OPTS="$JAVA_OPTS -Dorg.apache.flume.log.rawdata=true -Dorg.apache.flume.log.printconfig=true "
|
||||
|
||||
# Note that the Flume conf directory is always included in the classpath.
|
||||
#FLUME_CLASSPATH=""
|
||||
|
||||
@@ -0,0 +1,151 @@
|
||||
##############################################################
|
||||
20200512174120 ---> the security_f2k_APP restart ---> 1
|
||||
20200512174131 ---> the security_f2k_APP restart ---> 2
|
||||
20200512174141 ---> the security_f2k_APP restart ---> 3
|
||||
20200512174151 ---> the security_f2k_APP restart ---> 4
|
||||
20200512174201 ---> the security_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512180513 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180523 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180533 ---> the security_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512180536 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180546 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180556 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180606 ---> the security_f2k_APP restart ---> 4
|
||||
20200512180616 ---> the security_f2k_APP restart ---> 5
|
||||
20200512180626 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180636 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180647 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180657 ---> the security_f2k_APP restart ---> 4
|
||||
20200512180707 ---> the security_f2k_APP restart ---> 5
|
||||
20200512180717 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180727 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180737 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180747 ---> the security_f2k_APP restart ---> 4
|
||||
20200512180757 ---> the security_f2k_APP restart ---> 5
|
||||
20200512180807 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180817 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180827 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180837 ---> the security_f2k_APP restart ---> 4
|
||||
20200512180847 ---> the security_f2k_APP restart ---> 5
|
||||
20200512180857 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180907 ---> the security_f2k_APP restart ---> 2
|
||||
20200512180917 ---> the security_f2k_APP restart ---> 3
|
||||
20200512180927 ---> the security_f2k_APP restart ---> 4
|
||||
20200512180937 ---> the security_f2k_APP restart ---> 5
|
||||
20200512180947 ---> the security_f2k_APP restart ---> 1
|
||||
20200512180957 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181007 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181017 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181027 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181037 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181048 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181058 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181108 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181118 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181128 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181138 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181148 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181158 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181208 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181218 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181228 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181238 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181248 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181258 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181308 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181318 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181328 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181338 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181348 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181358 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181408 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181418 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181429 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181439 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181449 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181459 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181509 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181519 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181529 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181539 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181549 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181559 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181609 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181619 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181629 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181639 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181649 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181659 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181709 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181719 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181729 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181739 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181749 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181759 ---> the security_f2k_APP restart ---> 5
|
||||
20200512181809 ---> the security_f2k_APP restart ---> 1
|
||||
20200512181820 ---> the security_f2k_APP restart ---> 2
|
||||
20200512181830 ---> the security_f2k_APP restart ---> 3
|
||||
20200512181840 ---> the security_f2k_APP restart ---> 4
|
||||
20200512181850 ---> the security_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512183204 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183214 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183225 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183235 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183245 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183255 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183305 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183315 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183325 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183335 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183345 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183355 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183405 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183415 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183425 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183435 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183445 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183455 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183505 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183515 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183525 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183535 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183545 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183555 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183605 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183615 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183626 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183636 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183646 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183656 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183706 ---> the security_f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200512183753 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183803 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183813 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183823 ---> the security_f2k_APP restart ---> 4
|
||||
20200512183833 ---> the security_f2k_APP restart ---> 5
|
||||
20200512183843 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183853 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183903 ---> the security_f2k_APP restart ---> 3
|
||||
##############################################################
|
||||
20200512183921 ---> the security_f2k_APP restart ---> 1
|
||||
20200512183931 ---> the security_f2k_APP restart ---> 2
|
||||
20200512183941 ---> the security_f2k_APP restart ---> 3
|
||||
20200512183951 ---> the security_f2k_APP restart ---> 4
|
||||
20200512184002 ---> the security_f2k_APP restart ---> 5
|
||||
##############################################################
|
||||
20200512184121 ---> the security_f2k_APP restart ---> 1
|
||||
20200512184131 ---> the security_f2k_APP restart ---> 2
|
||||
20200512184141 ---> the security_f2k_APP restart ---> 3
|
||||
20200512184151 ---> the security_f2k_APP restart ---> 4
|
||||
20200512184201 ---> the security_f2k_APP restart ---> 5
|
||||
20200512184211 ---> the security_f2k_APP restart ---> 1
|
||||
##############################################################
|
||||
20200512184503 ---> the security_f2k_APP restart ---> 1
|
||||
20200512184513 ---> the security_f2k_APP restart ---> 2
|
||||
20200512184523 ---> the security_f2k_APP restart ---> 3
|
||||
20200512184533 ---> the security_f2k_APP restart ---> 4
|
||||
20200512184543 ---> the security_f2k_APP restart ---> 5
|
||||
40
internal-flume/conf/security/security_f2k1.properties
Normal file
40
internal-flume/conf/security/security_f2k1.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
security.sources = s2
|
||||
security.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
security.sources.s2.channels = c2
|
||||
#taildir source
|
||||
security.sources.s2.type = TAILDIR
|
||||
security.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
security.sources.s2.positionFile = /home/test/1security_position.json
|
||||
#需要分组13台主机的数据
|
||||
security.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
security.sources.s2.filegroups.f1 = /home/data/192.168.60.101/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f2 = /home/data/192.168.60.102/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f3 = /home/data/192.168.60.103/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f4 = /home/data/192.168.60.104/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f5 = /home/data/192.168.60.105/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f6 = /home/data/192.168.60.106/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f7 = /home/data/192.168.60.107/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f8 = /home/data/192.168.60.108/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f9 = /home/data/192.168.60.109/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f10 = /home/data/192.168.60.110/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f11 = /home/data/192.168.60.111/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f12 = /home/data/192.168.60.112/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
security.sources.s2.filegroups.f13 = /home/data/192.168.60.113/SECURITY-EVENT-LOG/.*[0-1].dat
|
||||
|
||||
|
||||
security.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
security.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
security.channels.c2.brokerList = 192.168.40.222:9093
|
||||
security.channels.c2.topic = SECURITY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
security.channels.c2.parseAsFlumeEvent = false
|
||||
security.channels.c2.kafka.producer.acks = 1
|
||||
security.channels.c2.producer.type=sync
|
||||
security.channels.c2.queue.buffering.max.ms = 5000
|
||||
security.channels.c2.queue.buffering.max.messages=20000
|
||||
security.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/security/security_f2k2.properties
Normal file
40
internal-flume/conf/security/security_f2k2.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
security.sources = s2
|
||||
security.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
security.sources.s2.channels = c2
|
||||
#taildir source
|
||||
security.sources.s2.type = TAILDIR
|
||||
security.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
security.sources.s2.positionFile = /home/test/2security_position.json
|
||||
#需要分组13台主机的数据
|
||||
security.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
security.sources.s2.filegroups.f1 = /home/data/192.168.60.101/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f2 = /home/data/192.168.60.102/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f3 = /home/data/192.168.60.103/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f4 = /home/data/192.168.60.104/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f5 = /home/data/192.168.60.105/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f6 = /home/data/192.168.60.106/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f7 = /home/data/192.168.60.107/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f8 = /home/data/192.168.60.108/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f9 = /home/data/192.168.60.109/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f10 = /home/data/192.168.60.110/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f11 = /home/data/192.168.60.111/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f12 = /home/data/192.168.60.112/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
security.sources.s2.filegroups.f13 = /home/data/192.168.60.113/SECURITY-EVENT-LOG/.*[2-3].dat
|
||||
|
||||
|
||||
security.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
security.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
security.channels.c2.brokerList = 192.168.40.222:9093
|
||||
security.channels.c2.topic = SECURITY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
security.channels.c2.parseAsFlumeEvent = false
|
||||
security.channels.c2.kafka.producer.acks = 1
|
||||
security.channels.c2.producer.type=sync
|
||||
security.channels.c2.queue.buffering.max.ms = 5000
|
||||
security.channels.c2.queue.buffering.max.messages=20000
|
||||
security.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/security/security_f2k3.properties
Normal file
40
internal-flume/conf/security/security_f2k3.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
security.sources = s2
|
||||
security.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
security.sources.s2.channels = c2
|
||||
#taildir source
|
||||
security.sources.s2.type = TAILDIR
|
||||
security.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
security.sources.s2.positionFile = /home/test/3security_position.json
|
||||
#需要分组13台主机的数据
|
||||
security.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
security.sources.s2.filegroups.f1 = /home/data/192.168.60.101/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f2 = /home/data/192.168.60.102/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f3 = /home/data/192.168.60.103/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f4 = /home/data/192.168.60.104/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f5 = /home/data/192.168.60.105/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f6 = /home/data/192.168.60.106/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f7 = /home/data/192.168.60.107/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f8 = /home/data/192.168.60.108/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f9 = /home/data/192.168.60.109/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f10 = /home/data/192.168.60.110/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f11 = /home/data/192.168.60.111/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f12 = /home/data/192.168.60.112/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
security.sources.s2.filegroups.f13 = /home/data/192.168.60.113/SECURITY-EVENT-LOG/.*[4-5].dat
|
||||
|
||||
|
||||
security.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
security.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
security.channels.c2.brokerList = 192.168.40.222:9093
|
||||
security.channels.c2.topic = SECURITY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
security.channels.c2.parseAsFlumeEvent = false
|
||||
security.channels.c2.kafka.producer.acks = 1
|
||||
security.channels.c2.producer.type=sync
|
||||
security.channels.c2.queue.buffering.max.ms = 5000
|
||||
security.channels.c2.queue.buffering.max.messages=20000
|
||||
security.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/security/security_f2k4.properties
Normal file
40
internal-flume/conf/security/security_f2k4.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
security.sources = s2
|
||||
security.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
security.sources.s2.channels = c2
|
||||
#taildir source
|
||||
security.sources.s2.type = TAILDIR
|
||||
security.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
security.sources.s2.positionFile = /home/test/4security_position.json
|
||||
#需要分组13台主机的数据
|
||||
security.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
security.sources.s2.filegroups.f1 = /home/data/192.168.60.101/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f2 = /home/data/192.168.60.102/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f3 = /home/data/192.168.60.103/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f4 = /home/data/192.168.60.104/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f5 = /home/data/192.168.60.105/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f6 = /home/data/192.168.60.106/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f7 = /home/data/192.168.60.107/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f8 = /home/data/192.168.60.108/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f9 = /home/data/192.168.60.109/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f10 = /home/data/192.168.60.110/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f11 = /home/data/192.168.60.111/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f12 = /home/data/192.168.60.112/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
security.sources.s2.filegroups.f13 = /home/data/192.168.60.113/SECURITY-EVENT-LOG/.*[6-7].dat
|
||||
|
||||
|
||||
security.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
security.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
security.channels.c2.brokerList = 192.168.40.222:9093
|
||||
security.channels.c2.topic = SECURITY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
security.channels.c2.parseAsFlumeEvent = false
|
||||
security.channels.c2.kafka.producer.acks = 1
|
||||
security.channels.c2.producer.type=sync
|
||||
security.channels.c2.queue.buffering.max.ms = 5000
|
||||
security.channels.c2.queue.buffering.max.messages=20000
|
||||
security.channels.c2.batch.num.messages=5000
|
||||
40
internal-flume/conf/security/security_f2k5.properties
Normal file
40
internal-flume/conf/security/security_f2k5.properties
Normal file
@@ -0,0 +1,40 @@
|
||||
security.sources = s2
|
||||
security.channels = c2
|
||||
#指定source收集到的数据发送到哪个管道
|
||||
security.sources.s2.channels = c2
|
||||
#taildir source
|
||||
security.sources.s2.type = TAILDIR
|
||||
security.sources.s2.channels = c2
|
||||
#需要映射到宿主机做持久化
|
||||
security.sources.s2.positionFile = /home/test/5security_position.json
|
||||
#需要分组13台主机的数据
|
||||
security.sources.s2.filegroups = f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13
|
||||
#需要分组按照序号 [0-9] 目前最多支持10个进程,可以增加
|
||||
security.sources.s2.filegroups.f1 = /home/data/192.168.60.101/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f2 = /home/data/192.168.60.102/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f3 = /home/data/192.168.60.103/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f4 = /home/data/192.168.60.104/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f5 = /home/data/192.168.60.105/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f6 = /home/data/192.168.60.106/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f7 = /home/data/192.168.60.107/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f8 = /home/data/192.168.60.108/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f9 = /home/data/192.168.60.109/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f10 = /home/data/192.168.60.110/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f11 = /home/data/192.168.60.111/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f12 = /home/data/192.168.60.112/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
security.sources.s2.filegroups.f13 = /home/data/192.168.60.113/SECURITY-EVENT-LOG/.*[8-9].dat
|
||||
|
||||
|
||||
security.sources.s2.maxBatchCount = 10000
|
||||
|
||||
# kafka channel充当生产者
|
||||
security.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
||||
security.channels.c2.brokerList = 192.168.40.222:9093
|
||||
security.channels.c2.topic = SECURITY-EVENT-LOG
|
||||
#false表示是以纯文本的形式写进入的,true是以event的形式写进入的,以event写进入时,会出现乱码, 默认是true
|
||||
security.channels.c2.parseAsFlumeEvent = false
|
||||
security.channels.c2.kafka.producer.acks = 1
|
||||
security.channels.c2.producer.type=sync
|
||||
security.channels.c2.queue.buffering.max.ms = 5000
|
||||
security.channels.c2.queue.buffering.max.messages=20000
|
||||
security.channels.c2.batch.num.messages=5000
|
||||
5
internal-flume/conf/security/start_flume.sh
Executable file
5
internal-flume/conf/security/start_flume.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup ${BASE_DIR}/dae_f2k.sh >/dev/null 2>&1 &
|
||||
31
internal-flume/conf/security/stop_flume.sh
Executable file
31
internal-flume/conf/security/stop_flume.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/sh
|
||||
|
||||
#DAE_NAME=dae_k2ha.sh
|
||||
#JAR_NAME=k2ha.properties
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
DAE_NAME=`ls ${BASE_DIR} | grep ^dae_* | grep .sh$`
|
||||
JAR_NAME=`ls ${BASE_DIR} | grep f2k\* | grep .properties$`
|
||||
|
||||
NUM1=`ps -ef | grep ${DAE_NAME} | grep -v grep | wc -l`
|
||||
pids1=$(ps -ef | grep ${DAE_NAME} | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM1}" -ge "1" ];then
|
||||
for pid1 in $pids1
|
||||
do
|
||||
kill -9 $pid1
|
||||
echo 'killed '${DAE_NAME}' process-->'$pid1
|
||||
done
|
||||
fi
|
||||
|
||||
for jar in $JAR_NAME ; do
|
||||
|
||||
NUM2=`ps -ef | grep $jar | grep -v grep | wc -l`
|
||||
pids2=$(ps -ef | grep $jar | grep -v grep | awk '{print $2}')
|
||||
if [ "${NUM2}" -ge "1" ];then
|
||||
for pid2 in $pids2
|
||||
do
|
||||
kill -9 $pid2
|
||||
echo 'killed '${JAR_NAME}' process-->'$pid2
|
||||
done
|
||||
fi
|
||||
done
|
||||
BIN
internal-flume/dat/Kazakhstan.mmdb
Normal file
BIN
internal-flume/dat/Kazakhstan.mmdb
Normal file
Binary file not shown.
BIN
internal-flume/dat/asn_v4.mmdb
Normal file
BIN
internal-flume/dat/asn_v4.mmdb
Normal file
Binary file not shown.
BIN
internal-flume/dat/asn_v6.mmdb
Normal file
BIN
internal-flume/dat/asn_v6.mmdb
Normal file
Binary file not shown.
7
internal-flume/flumeController/bootstart.log
Normal file
7
internal-flume/flumeController/bootstart.log
Normal file
@@ -0,0 +1,7 @@
|
||||
---------------------------Cap-Flume-Started-20191129175126---------------------------------
|
||||
---------------------------Cap-Flume-Started-20191202163810---------------------------------
|
||||
---------------------------Cap-Flume-Started-20191204141708---------------------------------
|
||||
---------------------------Cap-Flume-Started-20191204141943---------------------------------
|
||||
---------------------------Cap-Flume-Started-20191220145125---------------------------------
|
||||
---------------------------Cap-Flume-Started-20191220145338---------------------------------
|
||||
---------------------------Cap-Flume-Started-20191220145342---------------------------------
|
||||
18
internal-flume/flumeController/change_log_lv.sh
Executable file
18
internal-flume/flumeController/change_log_lv.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#! /bin/bash
|
||||
|
||||
#flume配置文件夹路径,注意最后没有/
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
|
||||
from_lv=INFO,LOGFILE
|
||||
to_lv=WARN,LOGFILE
|
||||
|
||||
#from_lv=WARN,LOGFILE
|
||||
#to_lv=INFO,LOGFILE
|
||||
|
||||
for conf_name in `ls ${BASE_DIR}`
|
||||
do
|
||||
|
||||
# sed -i 's/flume.root.logger=INFO,LOGFILE/flume.root.logger=WARN,LOGFILE/' ${BASE_DIR}/${conf_name}/log4j.properties
|
||||
sed -i 's/flume.root.logger='${from_lv}'/flume.root.logger='${to_lv}'/' ${BASE_DIR}/${conf_name}/log4j.properties
|
||||
|
||||
done
|
||||
18
internal-flume/flumeController/clean_log_all.sh
Executable file
18
internal-flume/flumeController/clean_log_all.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#! /bin/bash
|
||||
|
||||
#flume配置文件夹路径,注意最后没有/
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
|
||||
for conf_name in `ls ${BASE_DIR}`
|
||||
do
|
||||
|
||||
# rm -rf ${BASE_DIR}/${conf_name}/logs-*/*.log*
|
||||
rm -rf ${BASE_DIR}/${conf_name}/logs/*.log*
|
||||
|
||||
rm -rf ${BASE_DIR}/${conf_name}/restart_log/*.log*
|
||||
|
||||
rm -rf ${BASE_DIR}/${conf_name}/checkpoint/*
|
||||
|
||||
rm -rf ${BASE_DIR}/${conf_name}/data/*
|
||||
|
||||
done
|
||||
16
internal-flume/flumeController/clean_start_all.sh
Executable file
16
internal-flume/flumeController/clean_start_all.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#! /bin/bash
|
||||
|
||||
#flume配置文件夹路径,注意最后没有/
|
||||
#主路径下
|
||||
#BASE_DIR=$(cd $(dirname $0); cd conf/; pwd)
|
||||
#flumeController路径下
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
|
||||
for conf_name in `ls ${BASE_DIR}`
|
||||
do
|
||||
cd ${BASE_DIR}/${conf_name} && ./clean_start_flume.sh
|
||||
|
||||
done
|
||||
|
||||
#用于脚本一直前台运行,防止docker自动退出,但本地运行不需要前台,需要注释掉本条指令
|
||||
tail -f /dev/null
|
||||
10
internal-flume/flumeController/count_all.sh
Executable file
10
internal-flume/flumeController/count_all.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#! /bin/bash
|
||||
|
||||
#flume配置文件夹路径,注意最后没有/
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
|
||||
for conf_name in `ls ${BASE_DIR}`
|
||||
do
|
||||
${BASE_DIR}/${conf_name}/count_flume.sh
|
||||
|
||||
done
|
||||
14
internal-flume/flumeController/start_all.sh
Executable file
14
internal-flume/flumeController/start_all.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#! /bin/bash
|
||||
|
||||
#flume配置文件夹路径,注意最后没有/
|
||||
#主路径下
|
||||
#BASE_DIR=$(cd $(dirname $0); cd conf/; pwd)
|
||||
#flumeController路径下
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
|
||||
|
||||
for conf_name in `ls ${BASE_DIR}`
|
||||
do
|
||||
cd ${BASE_DIR}/${conf_name} && ./start_flume.sh
|
||||
|
||||
done
|
||||
13
internal-flume/flumeController/stop_all.sh
Executable file
13
internal-flume/flumeController/stop_all.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#! /bin/bash
|
||||
|
||||
#flume配置文件夹路径,注意最后没有/
|
||||
#主路径下
|
||||
#BASE_DIR=$(cd $(dirname $0); cd conf/; pwd)
|
||||
#flumeController路径下
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
|
||||
for conf_name in `ls ${BASE_DIR}`
|
||||
do
|
||||
${BASE_DIR}/${conf_name}/stop_flume.sh
|
||||
|
||||
done
|
||||
0
internal-flume/flumeMonitor/file-monitor-flume.log
Normal file
0
internal-flume/flumeMonitor/file-monitor-flume.log
Normal file
2
internal-flume/flumeMonitor/flumeConfList
Normal file
2
internal-flume/flumeMonitor/flumeConfList
Normal file
@@ -0,0 +1,2 @@
|
||||
ntcCollHttpDocConf
|
||||
ntcCollMailConf
|
||||
1
internal-flume/flumeMonitor/readMe.txt
Normal file
1
internal-flume/flumeMonitor/readMe.txt
Normal file
@@ -0,0 +1 @@
|
||||
*/1 * * * * /home/apache-flume-1.9.0-bin/flumeMonitor/taskFlumeMinSum.sh >> /home/apache-flume-1.9.0-bin/flumeMonitor/file-monitor-flume.log
|
||||
36
internal-flume/flumeMonitor/taskFlumeMinSum.sh
Executable file
36
internal-flume/flumeMonitor/taskFlumeMinSum.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#! /bin/bash
|
||||
|
||||
#监控相关路径,不以/结尾
|
||||
BASE_DIR=$(cd $(dirname $0); pwd)
|
||||
CONFNAMES=$(cd $(dirname $0); cd ../conf/; pwd)
|
||||
lastMinTime=$(date +"20%y%m%d%H%M" -d "-2 min")
|
||||
|
||||
echo "###############################$lastMinTime######################################"
|
||||
echo $BASE_DIR
|
||||
echo $CONFNAMES
|
||||
|
||||
for conf_name in `cat $BASE_DIR/flumeConfList`
|
||||
do
|
||||
echo $conf_name
|
||||
echo $lastMinTime
|
||||
min_msgSuccessSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $14}' | awk '{sum+=$1}END{print sum}'`
|
||||
echo min_msgSuccessSum=$min_msgSuccessSum
|
||||
min_msgFailedSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $16}' | awk '{sum+=$1}END{print sum}'`
|
||||
echo min_msgFailedSum=$min_msgFailedSum
|
||||
min_msgReadyPostSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $18}' | awk '{sum+=$1}END{print sum}'`
|
||||
echo min_msgReadyPostSum=$min_msgReadyPostSum
|
||||
min_msgTotalSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $20}' | awk '{sum+=$1}END{print sum}'`
|
||||
echo min_msgTotalSum=$min_msgTotalSum
|
||||
#min_fileSuccessSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $20}' | awk '{sum+=$1}END{print sum}'`
|
||||
#echo min_fileSuccessSum=$min_fileSuccessSum
|
||||
#min_fileReadyPostSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $22}' | awk '{sum+=$1}END{print sum}'`
|
||||
#echo min_fileReadyPostSum=$min_fileReadyPostSum
|
||||
#min_fileBytesSuccessSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $24}' | awk '{sum+=$1}END{print sum}'`
|
||||
#echo min_fileBytesSuccessSum=$min_fileBytesSuccessSum
|
||||
#min_fileBytesReadyPostSum=`cat $CONFNAMES/$conf_name/logs/flume.log | grep 'last min' | grep "$lastMinTime" | awk '{print $26}' | awk '{sum+=$1}END{print sum}'`
|
||||
#echo min_fileBytesReadyPostSum=$min_fileBytesReadyPostSum
|
||||
|
||||
#curl -s -XPOST 'http://127.0.0.1:8086/write?db=telegraf&u=admin&p=telegraf' --data-binary 'Cap_File_Monitor,flumeTask='$conf_name',statType=OneMinMonitor,LogStatTime='$lastMinTime' MsgSuccessSum='$min_msgSuccessSum',MsgFailedSum='$min_msgFailedSum',MsgReadyPostSum='$min_msgReadyPostSum',FileSuccessSum='$min_fileSuccessSum',FileReadyPostSum='$min_fileReadyPostSum',FileBytesSuccessSum='$min_fileBytesSuccessSum',FileBytesReadyPostSum='$min_fileBytesReadyPostSum''
|
||||
curl -s -XPOST 'http://127.0.0.1:8086/write?db=telegraf&u=admin&p=telegraf' --data-binary 'Cap_File_Monitor,flumeTask='$conf_name',statType=OneMinMonitor,LogStatTime='$lastMinTime' MsgSuccessSum='$min_msgSuccessSum',MsgFailedSum='$min_msgFailedSum',MsgReadyPostSum='$min_msgReadyPostSum',MsgTotalSum='$min_msgTotalSum''
|
||||
echo $conf_name---$lastMinTime---
|
||||
done
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user