modify ddl and configuration template directory
This commit is contained in:
@@ -0,0 +1,16 @@
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS client_whois_owner String after client_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS client_whois_owner String after client_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS client_whois_owner String after client_isp;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS client_idc_renter String after client_whois_owner;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS client_idc_renter String after client_whois_owner;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS client_idc_renter String after client_whois_owner;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS server_whois_owner String after server_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS server_whois_owner String after server_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS server_whois_owner String after server_isp;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS server_idc_renter String after server_whois_owner;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS server_idc_renter String after server_whois_owner;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS server_idc_renter String after server_whois_owner;
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS common_end_time Int64 after common_stream_dir;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS common_end_time Int64 after common_stream_dir;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS common_end_time Int64 after common_stream_dir;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS common_start_time Int64 after common_stream_dir;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS common_start_time Int64 after common_stream_dir;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS common_start_time Int64 after common_stream_dir;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS stratum_mining_program String after ssh_hassh;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS stratum_mining_program String after ssh_hassh;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS stratum_mining_program String after ssh_hassh;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS stratum_mining_pools String after ssh_hassh;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS stratum_mining_pools String after ssh_hassh;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS stratum_mining_pools String after ssh_hassh;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS stratum_cryptocurrency String after ssh_hassh;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS stratum_cryptocurrency String after ssh_hassh;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS stratum_cryptocurrency String after ssh_hassh;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS ssl_version String after ssl_sni;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS ssl_version String after ssl_sni;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS ssl_version String after ssl_sni;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS ssl_ja3s_fingerprint String after ssl_san;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS ssl_ja3s_fingerprint String after ssl_san;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS ssl_ja3s_fingerprint String after ssl_san;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS ssl_ja3s_hash String after ssl_ja3_hash;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS ssl_ja3s_hash String after ssl_ja3_hash;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS ssl_ja3s_hash String after ssl_ja3_hash;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS client_asn String after client_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS client_asn String after client_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS client_asn String after client_isp;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS server_asn String after server_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS server_asn String after server_isp;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS server_asn String after server_isp;
|
||||
1051
cyber_narrator/upgrade/2022/CN-22.09/clickhouse/update-09-ck.sql
Normal file
1051
cyber_narrator/upgrade/2022/CN-22.09/clickhouse/update-09-ck.sql
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_cn_record
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=1
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_application_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_asn_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_domain_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_http_host_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_idc_renter_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_ip_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_protocol_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_region_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_ssl_sni_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
SESSION-RECORD-CN: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: cn-record-2
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.session_record_cn_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-APPLICATION: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_application_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_application_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-ASN: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_asn_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_asn_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DOMAIN: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_domain_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_domain_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-HTTP-HOST: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_http_host_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_http_host_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-IDC-RENTER: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_idc_renter_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_idc_renter_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-IP: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_ip_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_ip_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-PROTOCOL: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_protocol_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_protocol_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-REGION: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_region_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_region_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-SSL-SNI: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_ssl_sni_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_ssl_sni_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
14
cyber_narrator/upgrade/2022/CN-22.09/gohangout/start_all.sh
Normal file
14
cyber_narrator/upgrade/2022/CN-22.09/gohangout/start_all.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_idc_renter_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_http_host_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ssl_sni_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1
|
||||
397
cyber_narrator/upgrade/2022/CN-22.10/clickhouse/update-10-ck.sql
Normal file
397
cyber_narrator/upgrade/2022/CN-22.10/clickhouse/update-10-ck.sql
Normal file
@@ -0,0 +1,397 @@
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_link_local ON CLUSTER ck_cluster (
|
||||
client_country String,
|
||||
client_province String,
|
||||
client_region String,
|
||||
server_country String,
|
||||
server_province String,
|
||||
server_region String,
|
||||
common_egress_link_id Int64,
|
||||
common_ingress_link_id Int64,
|
||||
egress_link_direction String,
|
||||
ingress_link_direction String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Nullable(Float64),
|
||||
s2c_tcp_lostlen_ratio Nullable(Float64),
|
||||
tcp_lostlen_ratio Nullable(Float64),
|
||||
c2s_tcp_unorder_num_ratio Nullable(Float64),
|
||||
s2c_tcp_unorder_num_ratio Nullable(Float64),
|
||||
tcp_unorder_num_ratio Nullable(Float64),
|
||||
c2s_byte_retrans_ratio Nullable(Float64),
|
||||
s2c_byte_retrans_ratio Nullable(Float64),
|
||||
byte_retrans_ratio Nullable(Float64),
|
||||
c2s_pkt_retrans_ratio Nullable(Float64),
|
||||
s2c_pkt_retrans_ratio Nullable(Float64),
|
||||
pkt_retrans_ratio Nullable(Float64),
|
||||
avg_establish_latency_ms Nullable(Float64),
|
||||
avg_http_response_latency_ms Nullable(Float64),
|
||||
avg_ssl_con_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_link ON CLUSTER ck_cluster (
|
||||
client_country String,
|
||||
client_province String,
|
||||
client_region String,
|
||||
server_country String,
|
||||
server_province String,
|
||||
server_region String,
|
||||
common_egress_link_id Int64,
|
||||
common_ingress_link_id Int64,
|
||||
egress_link_direction String,
|
||||
ingress_link_direction String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Nullable(Float64),
|
||||
s2c_tcp_lostlen_ratio Nullable(Float64),
|
||||
tcp_lostlen_ratio Nullable(Float64),
|
||||
c2s_tcp_unorder_num_ratio Nullable(Float64),
|
||||
s2c_tcp_unorder_num_ratio Nullable(Float64),
|
||||
tcp_unorder_num_ratio Nullable(Float64),
|
||||
c2s_byte_retrans_ratio Nullable(Float64),
|
||||
s2c_byte_retrans_ratio Nullable(Float64),
|
||||
byte_retrans_ratio Nullable(Float64),
|
||||
c2s_pkt_retrans_ratio Nullable(Float64),
|
||||
s2c_pkt_retrans_ratio Nullable(Float64),
|
||||
pkt_retrans_ratio Nullable(Float64),
|
||||
avg_establish_latency_ms Nullable(Float64),
|
||||
avg_http_response_latency_ms Nullable(Float64),
|
||||
avg_ssl_con_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_link_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_link ON CLUSTER ck_query (
|
||||
client_country String,
|
||||
client_province String,
|
||||
client_region String,
|
||||
server_country String,
|
||||
server_province String,
|
||||
server_region String,
|
||||
common_egress_link_id Int64,
|
||||
common_ingress_link_id Int64,
|
||||
egress_link_direction String,
|
||||
ingress_link_direction String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Nullable(Float64),
|
||||
s2c_tcp_lostlen_ratio Nullable(Float64),
|
||||
tcp_lostlen_ratio Nullable(Float64),
|
||||
c2s_tcp_unorder_num_ratio Nullable(Float64),
|
||||
s2c_tcp_unorder_num_ratio Nullable(Float64),
|
||||
tcp_unorder_num_ratio Nullable(Float64),
|
||||
c2s_byte_retrans_ratio Nullable(Float64),
|
||||
s2c_byte_retrans_ratio Nullable(Float64),
|
||||
byte_retrans_ratio Nullable(Float64),
|
||||
c2s_pkt_retrans_ratio Nullable(Float64),
|
||||
s2c_pkt_retrans_ratio Nullable(Float64),
|
||||
pkt_retrans_ratio Nullable(Float64),
|
||||
avg_establish_latency_ms Nullable(Float64),
|
||||
avg_http_response_latency_ms Nullable(Float64),
|
||||
avg_ssl_con_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_link_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_server_ip_local ON CLUSTER ck_cluster (
|
||||
server_ip String,
|
||||
server_country String,
|
||||
server_city String,
|
||||
server_isp String,
|
||||
server_org String,
|
||||
server_role Array(String),
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,server_ip) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_server_ip ON CLUSTER ck_cluster (
|
||||
server_ip String,
|
||||
server_country String,
|
||||
server_city String,
|
||||
server_isp String,
|
||||
server_org String,
|
||||
server_role Array(String),
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_server_ip_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_server_ip ON CLUSTER ck_query (
|
||||
server_ip String,
|
||||
server_country String,
|
||||
server_city String,
|
||||
server_isp String,
|
||||
server_org String,
|
||||
server_role Array(String),
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_server_ip_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qname_local ON CLUSTER ck_cluster (
|
||||
qname String,
|
||||
qname_sld String,
|
||||
qname_tld String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,qname) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qname ON CLUSTER ck_cluster (
|
||||
qname String,
|
||||
qname_sld String,
|
||||
qname_tld String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qname_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qname ON CLUSTER ck_query (
|
||||
qname String,
|
||||
qname_sld String,
|
||||
qname_tld String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qname_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qtype_local ON CLUSTER ck_cluster (
|
||||
qtype Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,qtype) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qtype ON CLUSTER ck_cluster (
|
||||
qtype Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qtype_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qtype ON CLUSTER ck_query (
|
||||
qtype Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qtype_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rcode_local ON CLUSTER ck_cluster (
|
||||
rcode Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rcode) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rcode ON CLUSTER ck_cluster (
|
||||
rcode Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rcode_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rcode ON CLUSTER ck_query (
|
||||
rcode Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rcode_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_a_local ON CLUSTER ck_cluster (
|
||||
rr_a String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rr_a) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_a ON CLUSTER ck_cluster (
|
||||
rr_a String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_a_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_a ON CLUSTER ck_query (
|
||||
rr_a String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_a_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_aaaa_local ON CLUSTER ck_cluster (
|
||||
rr_aaaa String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rr_aaaa) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_aaaa ON CLUSTER ck_cluster (
|
||||
rr_aaaa String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_aaaa_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_aaaa ON CLUSTER ck_query (
|
||||
rr_aaaa String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_aaaa_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_cname_local ON CLUSTER ck_cluster (
|
||||
rr_cname String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rr_cname) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_cname ON CLUSTER ck_cluster (
|
||||
rr_cname String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_cname_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_cname ON CLUSTER ck_query (
|
||||
rr_cname String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_cname_local', rand());
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_link_local on cluster ck_cluster add column IF NOT EXISTS client_zone String after client_region;
|
||||
ALTER table cyber_narrator_galaxy.metric_link on cluster ck_cluster add column IF NOT EXISTS client_zone String after client_region;
|
||||
ALTER table cyber_narrator_galaxy.metric_link on cluster ck_query add column IF NOT EXISTS client_zone String after client_region;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_link_local on cluster ck_cluster add column IF NOT EXISTS server_zone String after server_region;
|
||||
ALTER table cyber_narrator_galaxy.metric_link on cluster ck_cluster add column IF NOT EXISTS server_zone String after server_region;
|
||||
ALTER table cyber_narrator_galaxy.metric_link on cluster ck_query add column IF NOT EXISTS server_zone String after server_region;
|
||||
|
||||
|
||||
alter table system.query_log on cluster ck_cluster modify TTL event_date + INTERVAL 30 DAY;
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_qname_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_qtype_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rcode_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rr_a_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rr_aaaa_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rr_cname_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_server_ip_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_link_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-QNAME: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_qname
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_qname_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-QTYPE: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_qtype_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_qtype_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RCODE: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rcode_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rcode_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RR-A: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rr_a_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rr_a_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RR-AAAA: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rr_aaaa_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rr_aaaa_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RR-CNAME: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rr_cname_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rr_cname_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-SERVER-IP: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_server_ip_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_server_ip_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-LINK: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_link_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_link_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
22
cyber_narrator/upgrade/2022/CN-22.10/gohangout/start_all.sh
Normal file
22
cyber_narrator/upgrade/2022/CN-22.10/gohangout/start_all.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_idc_renter_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_http_host_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ssl_sni_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1
|
||||
0
cyber_narrator/upgrade/2023/CN-23.02/.gitkeep
Normal file
0
cyber_narrator/upgrade/2023/CN-23.02/.gitkeep
Normal file
@@ -0,0 +1,68 @@
|
||||
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS domain_tags Array(String) after domain_whois_org;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS domain_tags Array(String) after domain_whois_org;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS domain_tags Array(String) after domain_whois_org;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS http_host_tags Array(String) after domain_tags;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS http_host_tags Array(String) after domain_tags;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS http_host_tags Array(String) after domain_tags;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS ssl_sni_tags Array(String) after http_host_tags;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS ssl_sni_tags Array(String) after http_host_tags;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS ssl_sni_tags Array(String) after http_host_tags;
|
||||
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS client_ip_tags Array(String) after client_idc_renter;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS client_ip_tags Array(String) after client_idc_renter;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS client_ip_tags Array(String) after client_idc_renter;
|
||||
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS server_ip_tags Array(String) after server_idc_renter;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS server_ip_tags Array(String) after server_idc_renter;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS server_ip_tags Array(String) after server_idc_renter;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS app_tags Array(String) after app_risk;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS app_tags Array(String) after app_risk;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS app_tags Array(String) after app_risk;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
0
cyber_narrator/upgrade/2023/CN-23.06/.gitkeep
Normal file
0
cyber_narrator/upgrade/2023/CN-23.06/.gitkeep
Normal file
@@ -0,0 +1,146 @@
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_entity_relation_local on cluster ck_cluster
|
||||
(
|
||||
app_name String,
|
||||
fqdn String,
|
||||
ip String,
|
||||
country String,
|
||||
province String,
|
||||
region String,
|
||||
asn String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (ip,
|
||||
fqdn,
|
||||
app_name)
|
||||
TTL toDateTime(update_time) + toIntervalSecond(2592000),
|
||||
toDateTime(update_time) + toIntervalSecond(1) GROUP BY ip,
|
||||
fqdn,
|
||||
app_name SET create_time = min(create_time),
|
||||
update_time = max(update_time),
|
||||
country = anyLast(country),
|
||||
province = anyLast(province),
|
||||
region = anyLast(region),
|
||||
asn = anyLast(asn) ;
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_entity_relation on cluster ck_query
|
||||
(
|
||||
app_name String,
|
||||
fqdn String,
|
||||
ip String,
|
||||
country String,
|
||||
province String,
|
||||
region String,
|
||||
asn String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_entity_relation_local',
|
||||
rand());
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_entity_relation on cluster ck_cluster
|
||||
(
|
||||
app_name String,
|
||||
fqdn String,
|
||||
ip String,
|
||||
country String,
|
||||
province String,
|
||||
region String,
|
||||
asn String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_entity_relation_local',
|
||||
rand());
|
||||
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_dynamic_info_relation_local on cluster ck_cluster
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (ip,port,l7_protocol)
|
||||
TTL toDateTime(update_time) + toIntervalSecond(2592000) DELETE,
|
||||
toDateTime(update_time) + toIntervalSecond(1) GROUP BY ip,port,l7_protocol
|
||||
SET create_time = min(create_time),
|
||||
update_time = max(update_time) ;
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_dynamic_info_relation on cluster ck_query
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_dynamic_info_relation_local',
|
||||
rand());
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_dynamic_info_relation on cluster ck_cluster
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_dynamic_info_relation_local',
|
||||
rand());
|
||||
|
||||
|
||||
create MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_entity_relation_view on cluster ck_cluster TO cyber_narrator_galaxy.cn_entity_relation_local
|
||||
(
|
||||
app_name String,
|
||||
fqdn String,
|
||||
ip String,
|
||||
country String,
|
||||
province String,
|
||||
region String,
|
||||
asn String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
common_app_label AS app_name,
|
||||
ssl_sni AS fqdn,
|
||||
common_server_ip AS ip,
|
||||
anyLast(server_country) AS country,
|
||||
anyLast(server_province) AS province,
|
||||
anyLast(server_region) AS region,
|
||||
anyLast(server_asn) AS asn,
|
||||
min(c1.common_recv_time) AS create_time,
|
||||
max(c1.common_recv_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.session_record_cn_local c1
|
||||
where common_l4_protocol ='IPv4_TCP' OR common_server_port in(53,443)
|
||||
GROUP BY ip,app_name,fqdn;
|
||||
|
||||
|
||||
create MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_dynamic_info_relation_view on cluster ck_cluster TO cyber_narrator_galaxy.cn_dynamic_info_relation_local
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
common_server_ip as ip,
|
||||
common_l7_protocol as l7_protocol,
|
||||
common_server_port as port,
|
||||
min(c1.common_recv_time) AS create_time,
|
||||
max(c1.common_recv_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.session_record_cn_local c1
|
||||
where common_l4_protocol ='IPv4_TCP' OR common_server_port in(53,443)
|
||||
GROUP BY ip,l7_protocol,port;
|
||||
@@ -0,0 +1,32 @@
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS common_server_fqdn String after common_server_port;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS common_server_fqdn String after common_server_port;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS common_server_fqdn String after common_server_port;
|
||||
|
||||
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_entity_relation_view ON CLUSTER ck_cluster;
|
||||
|
||||
create MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_entity_relation_view on cluster ck_cluster TO cyber_narrator_galaxy.cn_entity_relation_local
|
||||
(
|
||||
app_name String,
|
||||
fqdn String,
|
||||
ip String,
|
||||
country String,
|
||||
province String,
|
||||
region String,
|
||||
asn String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
common_app_label AS app_name,
|
||||
common_server_fqdn AS fqdn,
|
||||
common_server_ip AS ip,
|
||||
anyLast(server_country) AS country,
|
||||
anyLast(server_province) AS province,
|
||||
anyLast(server_region) AS region,
|
||||
anyLast(server_asn) AS asn,
|
||||
min(c1.common_recv_time) AS create_time,
|
||||
max(c1.common_recv_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.session_record_cn_local c1
|
||||
where common_l4_protocol ='IPv4_TCP' OR common_server_port in(53,443)
|
||||
GROUP BY ip,app_name,fqdn;
|
||||
@@ -0,0 +1,515 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_idc_renter_local on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_idc_renter on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_idc_renter on cluster ck_query;
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_ssl_sni_local on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_ssl_sni on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_ssl_sni on cluster ck_query;
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_http_host_local on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_http_host on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_http_host on cluster ck_query;
|
||||
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_region_local on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_region on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_region on cluster ck_query;
|
||||
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_dynamic_info_relation_local on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_dynamic_info_relation on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_dynamic_info_relation on cluster ck_query;
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_entity_relation_local on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_entity_relation on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_entity_relation on cluster ck_query;
|
||||
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_dynamic_info_relation_view on cluster ck_cluster;
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_entity_relation_view on cluster ck_cluster;
|
||||
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster add column IF NOT EXISTS common_flags UInt64 after common_recv_time , add column IF NOT EXISTS common_log_id UInt64 after common_recv_time , add column IF NOT EXISTS common_app_full_path String after common_app_label , add column IF NOT EXISTS domain_sld String after domain;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster add column IF NOT EXISTS common_flags UInt64 after common_recv_time ,add column IF NOT EXISTS common_log_id UInt64 after common_recv_time , add column IF NOT EXISTS common_app_full_path String after common_app_label , add column IF NOT EXISTS domain_sld String after domain;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query add column IF NOT EXISTS common_flags UInt64 after common_recv_time ,add column IF NOT EXISTS common_log_id UInt64 after common_recv_time , add column IF NOT EXISTS common_app_full_path String after common_app_label , add column IF NOT EXISTS domain_sld String after domain;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster drop column IF EXISTS common_direction ,drop column IF EXISTS common_stream_dir ,drop column IF EXISTS common_server_fqdn ,drop column IF EXISTS common_app_id ,drop column IF EXISTS common_isp ,drop column IF EXISTS ssl_ja3_fingerprint ,drop column IF EXISTS ssl_ja3s_fingerprint ,drop column IF EXISTS domain_reputation_score ,drop column IF EXISTS http_host_tags ,drop column IF EXISTS ssl_sni_tags ,drop column IF EXISTS client_whois_owner ,drop column IF EXISTS client_idc_renter ,drop column IF EXISTS server_whois_owner ,drop column IF EXISTS server_idc_renter ,drop column IF EXISTS app_is_protocol ,drop column IF EXISTS app_risk ,drop column IF EXISTS dns_server_role ,drop column IF EXISTS dns_server_org ,drop column IF EXISTS dns_server_os ,drop column IF EXISTS dns_server_software ,drop column IF EXISTS dns_protocol;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster drop column IF EXISTS common_direction ,drop column IF EXISTS common_stream_dir ,drop column IF EXISTS common_server_fqdn ,drop column IF EXISTS common_app_id ,drop column IF EXISTS common_isp ,drop column IF EXISTS ssl_ja3_fingerprint ,drop column IF EXISTS ssl_ja3s_fingerprint ,drop column IF EXISTS domain_reputation_score ,drop column IF EXISTS http_host_tags ,drop column IF EXISTS ssl_sni_tags ,drop column IF EXISTS client_whois_owner ,drop column IF EXISTS client_idc_renter ,drop column IF EXISTS server_whois_owner ,drop column IF EXISTS server_idc_renter ,drop column IF EXISTS app_is_protocol ,drop column IF EXISTS app_risk ,drop column IF EXISTS dns_server_role ,drop column IF EXISTS dns_server_org ,drop column IF EXISTS dns_server_os ,drop column IF EXISTS dns_server_software ,drop column IF EXISTS dns_protocol;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query drop column IF EXISTS common_direction ,drop column IF EXISTS common_stream_dir ,drop column IF EXISTS common_server_fqdn ,drop column IF EXISTS common_app_id ,drop column IF EXISTS common_isp ,drop column IF EXISTS ssl_ja3_fingerprint ,drop column IF EXISTS ssl_ja3s_fingerprint ,drop column IF EXISTS domain_reputation_score ,drop column IF EXISTS http_host_tags ,drop column IF EXISTS ssl_sni_tags ,drop column IF EXISTS client_whois_owner ,drop column IF EXISTS client_idc_renter ,drop column IF EXISTS server_whois_owner ,drop column IF EXISTS server_idc_renter ,drop column IF EXISTS app_is_protocol ,drop column IF EXISTS app_risk ,drop column IF EXISTS dns_server_role ,drop column IF EXISTS dns_server_org ,drop column IF EXISTS dns_server_os ,drop column IF EXISTS dns_server_software ,drop column IF EXISTS dns_protocol;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn_local on cluster ck_cluster rename column IF EXISTS common_ingress_link_id TO common_in_link_id ,rename column IF EXISTS common_egress_link_id TO common_out_link_id ,rename column IF EXISTS egress_link_direction TO out_link_direction ,rename column IF EXISTS ingress_link_direction TO in_link_direction ,rename column IF EXISTS client_country TO client_country_region ,rename column IF EXISTS client_province TO client_super_admin_area ,rename column IF EXISTS client_region TO client_admin_area ,rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_province TO server_super_admin_area ,rename column IF EXISTS server_region TO server_admin_area;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_cluster rename column IF EXISTS common_ingress_link_id TO common_in_link_id ,rename column IF EXISTS common_egress_link_id TO common_out_link_id ,rename column IF EXISTS egress_link_direction TO out_link_direction ,rename column IF EXISTS ingress_link_direction TO in_link_direction ,rename column IF EXISTS client_country TO client_country_region ,rename column IF EXISTS client_province TO client_super_admin_area ,rename column IF EXISTS client_region TO client_admin_area ,rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_province TO server_super_admin_area ,rename column IF EXISTS server_region TO server_admin_area;
|
||||
ALTER table cyber_narrator_galaxy.session_record_cn on cluster ck_query rename column IF EXISTS common_ingress_link_id TO common_in_link_id ,rename column IF EXISTS common_egress_link_id TO common_out_link_id ,rename column IF EXISTS egress_link_direction TO out_link_direction ,rename column IF EXISTS ingress_link_direction TO in_link_direction ,rename column IF EXISTS client_country TO client_country_region ,rename column IF EXISTS client_province TO client_super_admin_area ,rename column IF EXISTS client_region TO client_admin_area ,rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_province TO server_super_admin_area ,rename column IF EXISTS server_region TO server_admin_area;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_link_local on cluster ck_cluster rename column IF EXISTS client_country TO client_country_region ,rename column IF EXISTS client_province TO client_super_admin_area ,rename column IF EXISTS client_region TO client_admin_area ,rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_province TO server_super_admin_area ,rename column IF EXISTS server_region TO server_admin_area ,rename column IF EXISTS common_ingress_link_id TO common_in_link_id ,rename column IF EXISTS common_egress_link_id TO common_out_link_id ,rename column IF EXISTS egress_link_direction TO out_link_direction ,rename column IF EXISTS ingress_link_direction TO in_link_direction ;
|
||||
ALTER table cyber_narrator_galaxy.metric_link on cluster ck_cluster rename column IF EXISTS client_country TO client_country_region ,rename column IF EXISTS client_province TO client_super_admin_area ,rename column IF EXISTS client_region TO client_admin_area ,rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_province TO server_super_admin_area ,rename column IF EXISTS server_region TO server_admin_area ,rename column IF EXISTS common_ingress_link_id TO common_in_link_id ,rename column IF EXISTS common_egress_link_id TO common_out_link_id ,rename column IF EXISTS egress_link_direction TO out_link_direction ,rename column IF EXISTS ingress_link_direction TO in_link_direction ;
|
||||
ALTER table cyber_narrator_galaxy.metric_link on cluster ck_query rename column IF EXISTS client_country TO client_country_region ,rename column IF EXISTS client_province TO client_super_admin_area ,rename column IF EXISTS client_region TO client_admin_area ,rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_province TO server_super_admin_area ,rename column IF EXISTS server_region TO server_admin_area ,rename column IF EXISTS common_ingress_link_id TO common_in_link_id ,rename column IF EXISTS common_egress_link_id TO common_out_link_id ,rename column IF EXISTS egress_link_direction TO out_link_direction ,rename column IF EXISTS ingress_link_direction TO in_link_direction ;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip_local on cluster ck_cluster add column IF NOT EXISTS server_super_admin_area String AFTER server_country;
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip on cluster ck_cluster add column IF NOT EXISTS server_super_admin_area String AFTER server_country;
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip on cluster ck_query add column IF NOT EXISTS server_super_admin_area String AFTER server_country;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip_local on cluster ck_cluster rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_city TO server_admin_area;
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip on cluster ck_cluster rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_city TO server_admin_area;
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip on cluster ck_query rename column IF EXISTS server_country TO server_country_region ,rename column IF EXISTS server_city TO server_admin_area;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip_local on cluster ck_cluster drop column IF EXISTS server_org;
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip on cluster ck_cluster drop column IF EXISTS server_org;
|
||||
ALTER table cyber_narrator_galaxy.metric_dns_server_ip on cluster ck_query drop column IF EXISTS server_org;
|
||||
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_domain_local on cluster ck_cluster add column IF NOT EXISTS domain_sld String after domain;
|
||||
ALTER table cyber_narrator_galaxy.metric_domain on cluster ck_cluster add column IF NOT EXISTS domain_sld String after domain;
|
||||
ALTER table cyber_narrator_galaxy.metric_domain on cluster ck_query add column IF NOT EXISTS domain_sld String after domain;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_region_local ON CLUSTER ck_cluster (
|
||||
country_region String,
|
||||
super_admin_area String,
|
||||
admin_area String,
|
||||
side String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Nullable(Float64),
|
||||
s2c_tcp_lostlen_ratio Nullable(Float64),
|
||||
tcp_lostlen_ratio Nullable(Float64),
|
||||
c2s_tcp_unorder_num_ratio Nullable(Float64),
|
||||
s2c_tcp_unorder_num_ratio Nullable(Float64),
|
||||
tcp_unorder_num_ratio Nullable(Float64),
|
||||
c2s_byte_retrans_ratio Nullable(Float64),
|
||||
s2c_byte_retrans_ratio Nullable(Float64),
|
||||
byte_retrans_ratio Nullable(Float64),
|
||||
c2s_pkt_retrans_ratio Nullable(Float64),
|
||||
s2c_pkt_retrans_ratio Nullable(Float64),
|
||||
pkt_retrans_ratio Nullable(Float64),
|
||||
avg_establish_latency_ms Nullable(Float64),
|
||||
avg_http_response_latency_ms Nullable(Float64),
|
||||
avg_ssl_con_latency_ms Nullable(Float64)
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,country_region,super_admin_area,admin_area) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_region ON CLUSTER ck_cluster (
|
||||
country_region String,
|
||||
super_admin_area String,
|
||||
admin_area String,
|
||||
side String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Nullable(Float64),
|
||||
s2c_tcp_lostlen_ratio Nullable(Float64),
|
||||
tcp_lostlen_ratio Nullable(Float64),
|
||||
c2s_tcp_unorder_num_ratio Nullable(Float64),
|
||||
s2c_tcp_unorder_num_ratio Nullable(Float64),
|
||||
tcp_unorder_num_ratio Nullable(Float64),
|
||||
c2s_byte_retrans_ratio Nullable(Float64),
|
||||
s2c_byte_retrans_ratio Nullable(Float64),
|
||||
byte_retrans_ratio Nullable(Float64),
|
||||
c2s_pkt_retrans_ratio Nullable(Float64),
|
||||
s2c_pkt_retrans_ratio Nullable(Float64),
|
||||
pkt_retrans_ratio Nullable(Float64),
|
||||
avg_establish_latency_ms Nullable(Float64),
|
||||
avg_http_response_latency_ms Nullable(Float64),
|
||||
avg_ssl_con_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_region_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_region ON CLUSTER ck_query (
|
||||
country_region String,
|
||||
super_admin_area String,
|
||||
admin_area String,
|
||||
side String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Nullable(Float64),
|
||||
s2c_tcp_lostlen_ratio Nullable(Float64),
|
||||
tcp_lostlen_ratio Nullable(Float64),
|
||||
c2s_tcp_unorder_num_ratio Nullable(Float64),
|
||||
s2c_tcp_unorder_num_ratio Nullable(Float64),
|
||||
tcp_unorder_num_ratio Nullable(Float64),
|
||||
c2s_byte_retrans_ratio Nullable(Float64),
|
||||
s2c_byte_retrans_ratio Nullable(Float64),
|
||||
byte_retrans_ratio Nullable(Float64),
|
||||
c2s_pkt_retrans_ratio Nullable(Float64),
|
||||
s2c_pkt_retrans_ratio Nullable(Float64),
|
||||
pkt_retrans_ratio Nullable(Float64),
|
||||
avg_establish_latency_ms Nullable(Float64),
|
||||
avg_http_response_latency_ms Nullable(Float64),
|
||||
avg_ssl_con_latency_ms Nullable(Float64)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_region_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_relation_local ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
stat_time Int64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_relation ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_relation_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_relation ON CLUSTER ck_query (
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_relation_local', rand());
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_ip_dynamic_attribute_local ON CLUSTER ck_cluster (
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
stat_time Int64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_ip_dynamic_attribute ON CLUSTER ck_cluster (
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_ip_dynamic_attribute_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_ip_dynamic_attribute ON CLUSTER ck_query (
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_ip_dynamic_attribute_local', rand());
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.cn_entity_relation_local on cluster ck_cluster
|
||||
(
|
||||
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (ip,
|
||||
domain,
|
||||
app_name)
|
||||
TTL toDateTime(update_time) + toIntervalSecond(2592000),
|
||||
toDateTime(update_time) + toIntervalSecond(1) GROUP BY ip,
|
||||
domain,
|
||||
app_name SET create_time = min(create_time),
|
||||
update_time = max(update_time),
|
||||
ip_country_region = anyLast(ip_country_region),
|
||||
ip_super_admin_area = anyLast(ip_super_admin_area),
|
||||
ip_admin_area = anyLast(ip_admin_area),
|
||||
ip_asn = anyLast(ip_asn),
|
||||
ip_isp = anyLast(ip_isp),
|
||||
domain_category_name = anyLast(domain_category_name),
|
||||
domain_category_group = anyLast(domain_category_group),
|
||||
app_category = anyLast(app_category),
|
||||
app_subcategory = anyLast(app_subcategory),
|
||||
entity_tags = groupUniqArrayArray(entity_tags);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.cn_entity_relation on cluster ck_query
|
||||
(
|
||||
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_entity_relation_local',
|
||||
rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.cn_entity_relation on cluster ck_cluster
|
||||
(
|
||||
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_entity_relation_local',
|
||||
rand());
|
||||
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_entity_relation_view on cluster ck_cluster TO cyber_narrator_galaxy.cn_entity_relation_local
|
||||
(
|
||||
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
common_app_label AS app_name,
|
||||
domain AS domain,
|
||||
common_server_ip AS ip,
|
||||
anyLast(server_country_region) AS ip_country_region,
|
||||
anyLast(server_super_admin_area) AS ip_super_admin_area,
|
||||
anyLast(server_admin_area) AS ip_admin_area,
|
||||
anyLast(server_asn) AS ip_asn,
|
||||
anyLast(server_isp) AS ip_isp,
|
||||
anyLast(domain_category_name) AS domain_category_name,
|
||||
anyLast(domain_category_group) AS domain_category_group,
|
||||
anyLast(app_category) AS app_category,
|
||||
anyLast(app_subcategory) AS app_subcategory,
|
||||
groupUniqArrayArray(arrayConcat(server_ip_tags,domain_tags,app_tags)) AS entity_tags,
|
||||
min(c1.common_recv_time) AS create_time,
|
||||
max(c1.common_recv_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.session_record_cn_local AS c1
|
||||
WHERE (common_l4_protocol = 'IPv4_TCP') OR (common_server_port IN (53,
|
||||
443))
|
||||
GROUP BY
|
||||
ip,
|
||||
app_name,
|
||||
domain;
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_entity_relation_view_metric on cluster ck_cluster TO cyber_narrator_galaxy.cn_entity_relation_local
|
||||
(
|
||||
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
app_name AS app_name,
|
||||
domain AS domain,
|
||||
ip AS ip,
|
||||
anyLast(ip_country_region) AS ip_country_region,
|
||||
anyLast(ip_super_admin_area) AS ip_super_admin_area,
|
||||
anyLast(ip_admin_area) AS ip_admin_area,
|
||||
anyLast(ip_asn) AS ip_asn,
|
||||
anyLast(ip_isp) AS ip_isp,
|
||||
anyLast(domain_category_name) AS domain_category_name,
|
||||
anyLast(domain_category_group) AS domain_category_group,
|
||||
anyLast(app_category) AS app_category,
|
||||
anyLast(app_subcategory) AS app_subcategory,
|
||||
groupUniqArrayArray(entity_tags) AS entity_tags,
|
||||
min(c1.stat_time) AS create_time,
|
||||
max(c1.stat_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.metric_relation_local AS c1
|
||||
GROUP BY
|
||||
ip,
|
||||
app_name,
|
||||
domain;
|
||||
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_ip_dynamic_attribute_local on cluster ck_cluster
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (ip,
|
||||
port,
|
||||
l7_protocol)
|
||||
TTL toDateTime(update_time) + toIntervalSecond(2592000),
|
||||
toDateTime(update_time) + toIntervalSecond(1) GROUP BY ip,
|
||||
port,
|
||||
l7_protocol SET create_time = min(create_time),
|
||||
update_time = max(update_time);
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_ip_dynamic_attribute on cluster ck_query
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_ip_dynamic_attribute_local',
|
||||
rand());
|
||||
|
||||
CREATE TABLE if not exists cyber_narrator_galaxy.cn_ip_dynamic_attribute on cluster ck_cluster
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_ip_dynamic_attribute_local',
|
||||
rand());
|
||||
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_ip_dynamic_attribute_view on cluster ck_cluster TO cyber_narrator_galaxy.cn_ip_dynamic_attribute_local
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
common_server_ip AS ip,
|
||||
common_l7_protocol AS l7_protocol,
|
||||
common_server_port AS port,
|
||||
min(c1.common_recv_time) AS create_time,
|
||||
max(c1.common_recv_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.session_record_cn_local AS c1
|
||||
WHERE (common_l4_protocol = 'IPv4_TCP') OR (common_server_port IN (53,
|
||||
443))
|
||||
GROUP BY
|
||||
ip,
|
||||
l7_protocol,
|
||||
port;
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_ip_dynamic_attribute_view_metric on cluster ck_cluster TO cyber_narrator_galaxy.cn_ip_dynamic_attribute_local
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
ip AS ip,
|
||||
l7_protocol AS l7_protocol,
|
||||
port AS port,
|
||||
min(c1.stat_time) AS create_time,
|
||||
max(c1.stat_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.metric_ip_dynamic_attribute_local AS c1
|
||||
GROUP BY
|
||||
ip,
|
||||
l7_protocol,
|
||||
port;
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_ip_dynamic_attribute_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_relation_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-ATTRIBUTE: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_ip_dynamic_attribute_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_ip_dynamic_attribute_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-RELATION: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-metric_relation_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_relation_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
21
cyber_narrator/upgrade/2023/CN-23.09/gohangout/start_all.sh
Normal file
21
cyber_narrator/upgrade/2023/CN-23.09/gohangout/start_all.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_relation_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_dynamic_attribute_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1
|
||||
@@ -0,0 +1,143 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS asymmetric_sessions Int64 after avg_ssl_con_latency_ms;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS asymmetric_sessions Int64 after avg_ssl_con_latency_ms;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS asymmetric_sessions Int64 after avg_ssl_con_latency_ms;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS bulky_sessions Int64 after asymmetric_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS bulky_sessions Int64 after asymmetric_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS bulky_sessions Int64 after asymmetric_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS cbr_streaming_sessions Int64 after bulky_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS cbr_streaming_sessions Int64 after bulky_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS cbr_streaming_sessions Int64 after bulky_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS download_sessions Int64 after cbr_streaming_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS download_sessions Int64 after cbr_streaming_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS download_sessions Int64 after cbr_streaming_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS interactive_sessions Int64 after download_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS interactive_sessions Int64 after download_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS interactive_sessions Int64 after download_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS pseudo_unidirectional_sessions Int64 after interactive_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS pseudo_unidirectional_sessions Int64 after interactive_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS pseudo_unidirectional_sessions Int64 after interactive_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS streaming_sessions Int64 after pseudo_unidirectional_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS streaming_sessions Int64 after pseudo_unidirectional_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS streaming_sessions Int64 after pseudo_unidirectional_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS unidirectional_sessions Int64 after streaming_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS unidirectional_sessions Int64 after streaming_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS unidirectional_sessions Int64 after streaming_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS random_looking_sessions Int64 after unidirectional_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS random_looking_sessions Int64 after unidirectional_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS random_looking_sessions Int64 after unidirectional_sessions;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_ip_local on cluster ck_cluster add column IF NOT EXISTS bidirectional_sessions Int64 after random_looking_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_cluster add column IF NOT EXISTS bidirectional_sessions Int64 after random_looking_sessions;
|
||||
ALTER table cyber_narrator_galaxy.metric_ip on cluster ck_query add column IF NOT EXISTS bidirectional_sessions Int64 after random_looking_sessions;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.cn_security_event_local ON CLUSTER ck_cluster
|
||||
(
|
||||
|
||||
status Int64,
|
||||
is_builtin Int64,
|
||||
rule_type String,
|
||||
victim_ip String,
|
||||
offender_ip String,
|
||||
event_info String,
|
||||
event_key String,
|
||||
severity String,
|
||||
event_type String,
|
||||
duration_s Int64,
|
||||
event_name String,
|
||||
app String,
|
||||
domain String,
|
||||
event_id Int64,
|
||||
rule_id Int64,
|
||||
start_time Int64,
|
||||
end_time Int64,
|
||||
match_times Int64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY event_id
|
||||
TTL toDateTime(end_time) + toIntervalSecond(2592000),
|
||||
toDateTime(end_time) + toIntervalSecond(1) GROUP BY event_id
|
||||
SET
|
||||
status = anyLast(status),
|
||||
is_builtin = anyLast(is_builtin),
|
||||
rule_type = anyLast(rule_type),
|
||||
victim_ip = anyLast(victim_ip),
|
||||
offender_ip = anyLast(offender_ip),
|
||||
event_info = anyLast(event_info),
|
||||
event_key = anyLast(event_key),
|
||||
severity = anyLast(severity),
|
||||
event_type = anyLast(event_type),
|
||||
duration_s = anyLast(duration_s),
|
||||
event_name = anyLast(event_name),
|
||||
app = anyLast(app),
|
||||
domain = anyLast(domain),
|
||||
rule_id = anyLast(rule_id),
|
||||
start_time = anyLast(start_time),
|
||||
end_time = max(end_time),
|
||||
match_times = anyLast(match_times);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.cn_security_event ON CLUSTER ck_cluster
|
||||
(
|
||||
|
||||
status Int64,
|
||||
is_builtin Int64,
|
||||
rule_type String,
|
||||
victim_ip String,
|
||||
offender_ip String,
|
||||
event_info String,
|
||||
event_key String,
|
||||
severity String,
|
||||
event_type String,
|
||||
duration_s Int64,
|
||||
event_name String,
|
||||
app String,
|
||||
domain String,
|
||||
event_id Int64,
|
||||
rule_id Int64,
|
||||
start_time Int64,
|
||||
end_time Int64,
|
||||
match_times Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_security_event_local',
|
||||
rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.cn_security_event ON CLUSTER ck_query
|
||||
(
|
||||
|
||||
status Int64,
|
||||
is_builtin Int64,
|
||||
rule_type String,
|
||||
victim_ip String,
|
||||
offender_ip String,
|
||||
event_info String,
|
||||
event_key String,
|
||||
severity String,
|
||||
event_type String,
|
||||
duration_s Int64,
|
||||
event_name String,
|
||||
app String,
|
||||
domain String,
|
||||
event_id Int64,
|
||||
rule_id Int64,
|
||||
start_time Int64,
|
||||
end_time Int64,
|
||||
match_times Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'cn_security_event_local',
|
||||
rand());
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_security_event_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
SECURITY-EVENT-CN: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: security_event_cn_1
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.cn_security_event_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
21
cyber_narrator/upgrade/2023/CN-23.10/gohangout/start_all.sh
Normal file
21
cyber_narrator/upgrade/2023/CN-23.10/gohangout/start_all.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_security_event_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_relation_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_dynamic_attribute_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1
|
||||
1004
cyber_narrator/upgrade/2023/CN-23.12/clickhouse/update-23.12-ck.sql
Normal file
1004
cyber_narrator/upgrade/2023/CN-23.12/clickhouse/update-23.12-ck.sql
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_subscriber_app_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-SUBSCRIBER-APP: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_subscriber_app_1
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_subscriber_app_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
22
cyber_narrator/upgrade/2023/CN-23.12/gohangout/start_all.sh
Normal file
22
cyber_narrator/upgrade/2023/CN-23.12/gohangout/start_all.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_subscriber_app_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_security_event_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_relation_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_dynamic_attribute_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1
|
||||
@@ -0,0 +1,6 @@
|
||||
v1.2.4 (2024-04-08)
|
||||
https://git.mesalab.cn/galaxy/platform/groot-stream/-/releases/v1.2.4
|
||||
|
||||
etl_session_record_kafka_to_cn_kafka 适用于单机场景(接的SESSION-RECORD)
|
||||
etl_session_record_processed_kafka_to_cn_kafka 适用于分中心->国家中心的场景(接的SESSION-RECORD-PROCESSED)
|
||||
|
||||
@@ -0,0 +1,404 @@
|
||||
sources:
|
||||
kafka_source:
|
||||
type: kafka
|
||||
# fields: # [array of object] Field List, if not set, all fields(Map<String, Object>) will be output.
|
||||
properties: # [object] Source Properties
|
||||
topic: SESSION-RECORD
|
||||
kafka.bootstrap.servers: {{ tsg_olap_kafka_servers }}
|
||||
kafka.session.timeout.ms: 60000
|
||||
kafka.max.poll.records: 3000
|
||||
kafka.max.partition.fetch.bytes: 31457280
|
||||
kafka.security.protocol: SASL_PLAINTEXT
|
||||
kafka.sasl.mechanism: PLAIN
|
||||
kafka.sasl.jaas.config: 454f65ea6eef1256e3067104f82730e737b68959560966b811e7ff364116b03124917eb2b0f3596f14733aa29ebad9352644ce1a5c85991c6f01ba8a5e8f177a7ff0b2d3889a424249967b3870b50993d9644f239f0de82cdb13bdb502959e16afadffa49ef1e1d2b9c9b5113e619817
|
||||
kafka.group.id: etl_processed_session_record_kafka_to_cn_kafka
|
||||
kafka.auto.offset.reset: latest
|
||||
format: json
|
||||
|
||||
processing_pipelines:
|
||||
session_record_processor: # [object] Processing Pipeline
|
||||
type: com.geedgenetworks.core.processor.projection.ProjectionProcessorImpl
|
||||
remove_fields:
|
||||
output_fields:
|
||||
functions: # [array of object] Function List
|
||||
- function: SNOWFLAKE_ID
|
||||
lookup_fields: [ '' ]
|
||||
output_fields: [ log_id ]
|
||||
parameters:
|
||||
data_center_id_num: 1
|
||||
|
||||
- function: UNIX_TIMESTAMP_CONVERTER
|
||||
lookup_fields: [ __timestamp ]
|
||||
output_fields: [ recv_time ]
|
||||
parameters:
|
||||
precision: seconds
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ domain ]
|
||||
parameters:
|
||||
value_expression: server_fqdn
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ domain_sld ]
|
||||
parameters:
|
||||
value_expression: server_domain
|
||||
|
||||
- function: CN_L7_PROTOCOL_AND_APP_EXTRACT
|
||||
parameters:
|
||||
decoded_path_field_name: decoded_path
|
||||
app_transition_field_name: app_transition
|
||||
l7_protocol_field_name: l7_protocol
|
||||
app_field_name: app
|
||||
l7_protocol: DHCP,DNS,FTP,GRE,GTP,HTTP,HTTPS,ICMP,IMAP,IMAPS,IPSEC,ISAKMP,XMPP,L2TP,LDAP,MMS,NETBIOS,NETFLOW,NTP,POP3,POP3S,RDP,PPTP,RADIUS,RTCP,RTP,RTSP,SIP,SMB,SMTP,SMTPS,SNMP,SSDP,SSH,SSL,STUN,TELNET,TFTP,OPENVPN,RTMP,TEREDO,FTPS,DTLS,SPDY,BJNP,QUIC,MDNS,Unknown TCP,Unknown UDP,Unknown Other,IKE,MAIL,SOCKS,DoH,SLP,SSL with ESNI,ISATAP,Stratum,SSL with ECH
|
||||
|
||||
- function: GEOIP_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ ]
|
||||
parameters:
|
||||
kb_name: cn_ip_location
|
||||
option: IP_TO_OBJECT
|
||||
geolocation_field_mapping:
|
||||
COUNTRY: client_country_region
|
||||
PROVINCE: client_super_admin_area
|
||||
CITY: client_admin_area
|
||||
LONGITUDE: client_longitude
|
||||
LATITUDE: client_latitude
|
||||
ISP: client_isp
|
||||
|
||||
- function: GEOIP_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ ]
|
||||
parameters:
|
||||
kb_name: cn_ip_location
|
||||
option: IP_TO_OBJECT
|
||||
geolocation_field_mapping:
|
||||
COUNTRY: server_country_region
|
||||
PROVINCE: server_super_admin_area
|
||||
CITY: server_admin_area
|
||||
LONGITUDE: server_longitude
|
||||
LATITUDE: server_latitude
|
||||
ISP: server_isp
|
||||
|
||||
- function: ASN_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_asn ]
|
||||
parameters:
|
||||
option: IP_TO_ASN
|
||||
kb_name: cn_ip_asn
|
||||
|
||||
- function: ASN_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_asn ]
|
||||
parameters:
|
||||
option: IP_TO_ASN
|
||||
kb_name: cn_ip_asn
|
||||
|
||||
- function: CN_IDC_RENTER_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_idc_renter ]
|
||||
parameters:
|
||||
kb_name: cn_idc_renter
|
||||
|
||||
- function: CN_IDC_RENTER_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_idc_renter ]
|
||||
parameters:
|
||||
kb_name: cn_idc_renter
|
||||
|
||||
- function: CN_LINK_DIRECTION_LOOKUP
|
||||
lookup_fields: [ in_link_id ]
|
||||
output_fields: [ in_link_direction ]
|
||||
parameters:
|
||||
kb_name: cn_link_direction
|
||||
|
||||
- function: CN_LINK_DIRECTION_LOOKUP
|
||||
lookup_fields: [ out_link_id ]
|
||||
output_fields: [ out_link_direction ]
|
||||
parameters:
|
||||
kb_name: cn_link_direction
|
||||
|
||||
- function: CN_FQDN_CATEGORY_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_category
|
||||
field_mapping:
|
||||
NAME: domain_category_name
|
||||
GROUP: domain_category_group
|
||||
REPUTATION_LEVEL: domain_reputation_level
|
||||
|
||||
- function: CN_ICP_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_icp_company_name ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_icp
|
||||
|
||||
- function: CN_FQDN_WHOIS_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_whois_org ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_whois
|
||||
|
||||
- function: CN_DNS_SERVER_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_dns_server ]
|
||||
parameters:
|
||||
kb_name: cn_dns_server
|
||||
|
||||
- function: CN_APP_CATEGORY_LOOKUP
|
||||
lookup_fields: [ app ]
|
||||
parameters:
|
||||
kb_name: cn_app_category
|
||||
field_mapping:
|
||||
CATEGORY: app_category
|
||||
SUBCATEGORY: app_subcategory
|
||||
COMPANY: app_company
|
||||
COMPANY_CATEGORY: app_company_category
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ client_zone ]
|
||||
parameters:
|
||||
value_expression: "flags & 8 == 8 ? 'internal' : 'external'"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ server_zone ]
|
||||
parameters:
|
||||
value_expression: "flags & 16 == 16 ? 'internal' : 'external'"
|
||||
|
||||
- function: CN_IP_ZONE_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_zone ]
|
||||
parameters:
|
||||
kb_name: none
|
||||
#kb_name: cn_internal_ip
|
||||
|
||||
- function: CN_IP_ZONE_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_zone ]
|
||||
parameters:
|
||||
kb_name: none
|
||||
#kb_name: cn_internal_ip
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sent_bytes ]
|
||||
parameters:
|
||||
value_expression: "sent_bytes == null ? 0 : sent_bytes"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sent_pkts ]
|
||||
parameters:
|
||||
value_expression: "sent_pkts == null ? 0 : sent_pkts"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ received_bytes ]
|
||||
parameters:
|
||||
value_expression: "received_bytes == null ? 0 : received_bytes"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ received_pkts ]
|
||||
parameters:
|
||||
value_expression: "received_pkts == null ? 0 : received_pkts"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? received_bytes : traffic_inbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? received_bytes : traffic_outbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? received_pkts : traffic_inbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? received_pkts : traffic_outbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? sent_bytes : traffic_outbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? sent_bytes : traffic_inbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? sent_pkts : traffic_outbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? sent_pkts : traffic_inbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_internal_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'internal' ? sent_bytes + received_bytes : traffic_internal_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_internal_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'internal' ? sent_pkts + received_pkts : traffic_internal_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_through_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'external' ? sent_bytes + received_bytes : traffic_through_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_through_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'external' ? sent_pkts + received_pkts : traffic_through_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sessions ]
|
||||
parameters:
|
||||
value_expression: "1"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ internal_query_num ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' ? sessions : internal_query_num"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ external_query_num ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' ? sessions : external_query_num"
|
||||
|
||||
- function: CN_VPN_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_vpn_service_name ]
|
||||
parameters:
|
||||
kb_name: cn_vpn_learning_ip
|
||||
option: IP_TO_VPN
|
||||
|
||||
- function: CN_VPN_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_vpn_service_name ]
|
||||
parameters:
|
||||
kb_name: cn_vpn_learning_domain
|
||||
option: DOMAIN_TO_VPN
|
||||
|
||||
- function: CN_IOC_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_malware ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_malware
|
||||
option: IP_TO_MALWARE
|
||||
|
||||
- function: CN_IOC_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_malware ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_malware
|
||||
option: DOMAIN_TO_MALWARE
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
parameters:
|
||||
kb_name: cn_ip_tag_user_define
|
||||
option: IP_TO_TAG
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
parameters:
|
||||
kb_name: cn_ip_tag_user_define
|
||||
option: IP_TO_TAG
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_tags ]
|
||||
parameters:
|
||||
kb_name: cn_domain_tag_user_define
|
||||
option: DOMAIN_TO_TAG
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ app ]
|
||||
output_fields: [ app_tags ]
|
||||
parameters:
|
||||
kb_name: cn_app_tag_user_define
|
||||
option: APP_TO_TAG
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ client_idc_renter,client_ip_tags ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ server_idc_renter,server_dns_server,server_node_type,server_malware,server_vpn_service_name,server_ip_tags ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ domain_node_type,domain_malware,domain_vpn_service_name,domain_tags ]
|
||||
output_fields: [ domain_tags ]
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ client_ip_tags ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
parameters:
|
||||
prefix: ip.
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ server_ip_tags ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
parameters:
|
||||
prefix: ip.
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ domain_tags ]
|
||||
output_fields: [ domain_tags ]
|
||||
parameters:
|
||||
prefix: domain.
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ app_tags ]
|
||||
output_fields: [ app_tags ]
|
||||
parameters:
|
||||
prefix: app.
|
||||
postprocessing_pipelines:
|
||||
remove_field_processor: # [object] Processing Pipeline
|
||||
type: com.geedgenetworks.core.processor.projection.ProjectionProcessorImpl
|
||||
output_fields: [ recv_time,log_id,flags,start_timestamp_ms,end_timestamp_ms,duration_ms,decoded_as,client_ip,server_ip,client_port,server_port,app,app_transition,decoded_path,ip_protocol,l7_protocol,out_link_id,in_link_id,subscriber_id,imei,imsi,phone_number,apn,http_url,dns_rcode,dns_qname,dns_qtype,dns_rr,out_link_direction,in_link_direction,server_fqdn,server_domain,domain,domain_sld,domain_category_name,domain_category_group,domain_reputation_level,domain_icp_company_name,domain_whois_org,domain_tags,client_zone,client_country_region,client_super_admin_area,client_admin_area,client_longitude,client_latitude,client_isp,client_asn,client_ip_tags,server_zone,server_country_region,server_super_admin_area,server_admin_area,server_longitude,server_latitude,server_isp,server_asn,server_ip_tags,app_category,app_subcategory,app_company,app_company_category,app_tags,sent_pkts,sent_bytes,received_pkts,received_bytes,sessions,tcp_c2s_lost_bytes,tcp_s2c_lost_bytes,tcp_c2s_o3_pkts,tcp_s2c_o3_pkts,tcp_c2s_rtx_bytes,tcp_s2c_rtx_bytes,tcp_c2s_rtx_pkts,tcp_s2c_rtx_pkts,tcp_rtt_ms,http_response_latency_ms,ssl_handshake_latency_ms,dns_response_latency_ms,cn_internal_rule_id_list,cn_internal_ioc_type_list,traffic_inbound_byte,traffic_inbound_pkt,traffic_outbound_byte,traffic_outbound_pkt,traffic_internal_byte,traffic_internal_pkt,traffic_through_byte,traffic_through_pkt,internal_query_num,external_query_num ]
|
||||
|
||||
sinks:
|
||||
cn_kafka_sink:
|
||||
type: kafka
|
||||
properties:
|
||||
topic: SESSION-RECORD-CN
|
||||
kafka.bootstrap.servers: {{ kafka_sink_servers }}
|
||||
kafka.retries: 0
|
||||
kafka.linger.ms: 10
|
||||
kafka.request.timeout.ms: 30000
|
||||
kafka.batch.size: 262144
|
||||
kafka.buffer.memory: 134217728
|
||||
kafka.max.request.size: 10485760
|
||||
kafka.compression.type: snappy
|
||||
kafka.security.protocol: SASL_PLAINTEXT
|
||||
kafka.sasl.mechanism: PLAIN
|
||||
kafka.sasl.jaas.config: 454f65ea6eef1256e3067104f82730e737b68959560966b811e7ff364116b03124917eb2b0f3596f14733aa29ebad9352644ce1a5c85991c6f01ba8a5e8f177a7ff0b2d3889a424249967b3870b50993d9644f239f0de82cdb13bdb502959e16afadffa49ef1e1d2b9c9b5113e619817
|
||||
format: json
|
||||
|
||||
application:
|
||||
env:
|
||||
name: etl_session_record_processed_kafka_to_cn_kafka
|
||||
shade.identifier: aes
|
||||
pipeline:
|
||||
object-reuse: true
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [ session_record_processor ]
|
||||
- name: session_record_processor
|
||||
downstream: [ remove_field_processor ]
|
||||
- name: remove_field_processor
|
||||
downstream: [ cn_kafka_sink ]
|
||||
- name: cn_kafka_sink
|
||||
downstream: [ ]
|
||||
@@ -0,0 +1,392 @@
|
||||
sources:
|
||||
kafka_source:
|
||||
type: kafka
|
||||
# fields: # [array of object] Field List, if not set, all fields(Map<String, Object>) will be output.
|
||||
properties: # [object] Source Properties
|
||||
topic: SESSION-RECORD-PROCESSED
|
||||
kafka.bootstrap.servers: {{ tsg_olap_kafka_servers }}
|
||||
kafka.session.timeout.ms: 60000
|
||||
kafka.max.poll.records: 3000
|
||||
kafka.max.partition.fetch.bytes: 31457280
|
||||
kafka.security.protocol: SASL_PLAINTEXT
|
||||
kafka.sasl.mechanism: PLAIN
|
||||
kafka.sasl.jaas.config: 454f65ea6eef1256e3067104f82730e737b68959560966b811e7ff364116b03124917eb2b0f3596f14733aa29ebad9352644ce1a5c85991c6f01ba8a5e8f177a7ff0b2d3889a424249967b3870b50993d9644f239f0de82cdb13bdb502959e16afadffa49ef1e1d2b9c9b5113e619817
|
||||
kafka.group.id: etl_processed_session_record_kafka_to_cn_kafka
|
||||
kafka.auto.offset.reset: latest
|
||||
format: json
|
||||
|
||||
processing_pipelines:
|
||||
session_record_processor: # [object] Processing Pipeline
|
||||
type: com.geedgenetworks.core.processor.projection.ProjectionProcessorImpl
|
||||
remove_fields:
|
||||
output_fields:
|
||||
functions: # [array of object] Function List
|
||||
- function: EVAL
|
||||
output_fields: [ domain ]
|
||||
parameters:
|
||||
value_expression: server_fqdn
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ domain_sld ]
|
||||
parameters:
|
||||
value_expression: server_domain
|
||||
|
||||
- function: CN_L7_PROTOCOL_AND_APP_EXTRACT
|
||||
parameters:
|
||||
decoded_path_field_name: decoded_path
|
||||
app_transition_field_name: app_transition
|
||||
l7_protocol_field_name: l7_protocol
|
||||
app_field_name: app
|
||||
l7_protocol: DHCP,DNS,FTP,GRE,GTP,HTTP,HTTPS,ICMP,IMAP,IMAPS,IPSEC,ISAKMP,XMPP,L2TP,LDAP,MMS,NETBIOS,NETFLOW,NTP,POP3,POP3S,RDP,PPTP,RADIUS,RTCP,RTP,RTSP,SIP,SMB,SMTP,SMTPS,SNMP,SSDP,SSH,SSL,STUN,TELNET,TFTP,OPENVPN,RTMP,TEREDO,FTPS,DTLS,SPDY,BJNP,QUIC,MDNS,Unknown TCP,Unknown UDP,Unknown Other,IKE,MAIL,SOCKS,DoH,SLP,SSL with ESNI,ISATAP,Stratum,SSL with ECH
|
||||
|
||||
- function: GEOIP_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ ]
|
||||
parameters:
|
||||
kb_name: cn_ip_location
|
||||
option: IP_TO_OBJECT
|
||||
geolocation_field_mapping:
|
||||
COUNTRY: client_country_region
|
||||
PROVINCE: client_super_admin_area
|
||||
CITY: client_admin_area
|
||||
LONGITUDE: client_longitude
|
||||
LATITUDE: client_latitude
|
||||
ISP: client_isp
|
||||
|
||||
- function: GEOIP_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ ]
|
||||
parameters:
|
||||
kb_name: cn_ip_location
|
||||
option: IP_TO_OBJECT
|
||||
geolocation_field_mapping:
|
||||
COUNTRY: server_country_region
|
||||
PROVINCE: server_super_admin_area
|
||||
CITY: server_admin_area
|
||||
LONGITUDE: server_longitude
|
||||
LATITUDE: server_latitude
|
||||
ISP: server_isp
|
||||
|
||||
- function: ASN_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_asn ]
|
||||
parameters:
|
||||
option: IP_TO_ASN
|
||||
kb_name: cn_ip_asn
|
||||
|
||||
- function: ASN_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_asn ]
|
||||
parameters:
|
||||
option: IP_TO_ASN
|
||||
kb_name: cn_ip_asn
|
||||
|
||||
- function: CN_IDC_RENTER_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_idc_renter ]
|
||||
parameters:
|
||||
kb_name: cn_idc_renter
|
||||
|
||||
- function: CN_IDC_RENTER_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_idc_renter ]
|
||||
parameters:
|
||||
kb_name: cn_idc_renter
|
||||
|
||||
- function: CN_LINK_DIRECTION_LOOKUP
|
||||
lookup_fields: [ in_link_id ]
|
||||
output_fields: [ in_link_direction ]
|
||||
parameters:
|
||||
kb_name: cn_link_direction
|
||||
|
||||
- function: CN_LINK_DIRECTION_LOOKUP
|
||||
lookup_fields: [ out_link_id ]
|
||||
output_fields: [ out_link_direction ]
|
||||
parameters:
|
||||
kb_name: cn_link_direction
|
||||
|
||||
- function: CN_FQDN_CATEGORY_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_category
|
||||
field_mapping:
|
||||
NAME: domain_category_name
|
||||
GROUP: domain_category_group
|
||||
REPUTATION_LEVEL: domain_reputation_level
|
||||
|
||||
- function: CN_ICP_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_icp_company_name ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_icp
|
||||
|
||||
- function: CN_FQDN_WHOIS_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_whois_org ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_whois
|
||||
|
||||
- function: CN_DNS_SERVER_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_dns_server ]
|
||||
parameters:
|
||||
kb_name: cn_dns_server
|
||||
|
||||
- function: CN_APP_CATEGORY_LOOKUP
|
||||
lookup_fields: [ app ]
|
||||
parameters:
|
||||
kb_name: cn_app_category
|
||||
field_mapping:
|
||||
CATEGORY: app_category
|
||||
SUBCATEGORY: app_subcategory
|
||||
COMPANY: app_company
|
||||
COMPANY_CATEGORY: app_company_category
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ client_zone ]
|
||||
parameters:
|
||||
value_expression: "flags & 8 == 8 ? 'internal' : 'external'"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ server_zone ]
|
||||
parameters:
|
||||
value_expression: "flags & 16 == 16 ? 'internal' : 'external'"
|
||||
|
||||
- function: CN_IP_ZONE_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_zone ]
|
||||
parameters:
|
||||
kb_name: none
|
||||
#kb_name: cn_internal_ip
|
||||
|
||||
- function: CN_IP_ZONE_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_zone ]
|
||||
parameters:
|
||||
kb_name: none
|
||||
#kb_name: cn_internal_ip
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sent_bytes ]
|
||||
parameters:
|
||||
value_expression: "sent_bytes == null ? 0 : sent_bytes"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sent_pkts ]
|
||||
parameters:
|
||||
value_expression: "sent_pkts == null ? 0 : sent_pkts"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ received_bytes ]
|
||||
parameters:
|
||||
value_expression: "received_bytes == null ? 0 : received_bytes"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ received_pkts ]
|
||||
parameters:
|
||||
value_expression: "received_pkts == null ? 0 : received_pkts"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? received_bytes : traffic_inbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? received_bytes : traffic_outbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? received_pkts : traffic_inbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? received_pkts : traffic_outbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? sent_bytes : traffic_outbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? sent_bytes : traffic_inbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? sent_pkts : traffic_outbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? sent_pkts : traffic_inbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_internal_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'internal' ? sent_bytes + received_bytes : traffic_internal_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_internal_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'internal' ? sent_pkts + received_pkts : traffic_internal_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_through_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'external' ? sent_bytes + received_bytes : traffic_through_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_through_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'external' ? sent_pkts + received_pkts : traffic_through_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sessions ]
|
||||
parameters:
|
||||
value_expression: "1"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ internal_query_num ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' ? sessions : internal_query_num"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ external_query_num ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' ? sessions : external_query_num"
|
||||
|
||||
- function: CN_VPN_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_vpn_service_name ]
|
||||
parameters:
|
||||
kb_name: cn_vpn_learning_ip
|
||||
option: IP_TO_VPN
|
||||
|
||||
- function: CN_VPN_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_vpn_service_name ]
|
||||
parameters:
|
||||
kb_name: cn_vpn_learning_domain
|
||||
option: DOMAIN_TO_VPN
|
||||
|
||||
- function: CN_IOC_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_malware ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_malware
|
||||
option: IP_TO_MALWARE
|
||||
|
||||
- function: CN_IOC_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_malware ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_malware
|
||||
option: DOMAIN_TO_MALWARE
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
parameters:
|
||||
kb_name: cn_ip_tag_user_define
|
||||
option: IP_TO_TAG
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
parameters:
|
||||
kb_name: cn_ip_tag_user_define
|
||||
option: IP_TO_TAG
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_tags ]
|
||||
parameters:
|
||||
kb_name: cn_domain_tag_user_define
|
||||
option: DOMAIN_TO_TAG
|
||||
|
||||
- function: CN_USER_DEFINE_TAG_LOOKUP
|
||||
lookup_fields: [ app ]
|
||||
output_fields: [ app_tags ]
|
||||
parameters:
|
||||
kb_name: cn_app_tag_user_define
|
||||
option: APP_TO_TAG
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ client_idc_renter,client_ip_tags ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ server_idc_renter,server_dns_server,server_node_type,server_malware,server_vpn_service_name,server_ip_tags ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ domain_node_type,domain_malware,domain_vpn_service_name,domain_tags ]
|
||||
output_fields: [ domain_tags ]
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ client_ip_tags ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
parameters:
|
||||
prefix: ip.
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ server_ip_tags ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
parameters:
|
||||
prefix: ip.
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ domain_tags ]
|
||||
output_fields: [ domain_tags ]
|
||||
parameters:
|
||||
prefix: domain.
|
||||
|
||||
- function: CN_ARRAY_ELEMENTS_PREPEND
|
||||
lookup_fields: [ app_tags ]
|
||||
output_fields: [ app_tags ]
|
||||
parameters:
|
||||
prefix: app.
|
||||
postprocessing_pipelines:
|
||||
remove_field_processor: # [object] Processing Pipeline
|
||||
type: com.geedgenetworks.core.processor.projection.ProjectionProcessorImpl
|
||||
output_fields: [ recv_time,log_id,flags,start_timestamp_ms,end_timestamp_ms,duration_ms,decoded_as,client_ip,server_ip,client_port,server_port,app,app_transition,decoded_path,ip_protocol,l7_protocol,out_link_id,in_link_id,subscriber_id,imei,imsi,phone_number,apn,http_url,dns_rcode,dns_qname,dns_qtype,dns_rr,out_link_direction,in_link_direction,server_fqdn,server_domain,domain,domain_sld,domain_category_name,domain_category_group,domain_reputation_level,domain_icp_company_name,domain_whois_org,domain_tags,client_zone,client_country_region,client_super_admin_area,client_admin_area,client_longitude,client_latitude,client_isp,client_asn,client_ip_tags,server_zone,server_country_region,server_super_admin_area,server_admin_area,server_longitude,server_latitude,server_isp,server_asn,server_ip_tags,app_category,app_subcategory,app_company,app_company_category,app_tags,sent_pkts,sent_bytes,received_pkts,received_bytes,sessions,tcp_c2s_lost_bytes,tcp_s2c_lost_bytes,tcp_c2s_o3_pkts,tcp_s2c_o3_pkts,tcp_c2s_rtx_bytes,tcp_s2c_rtx_bytes,tcp_c2s_rtx_pkts,tcp_s2c_rtx_pkts,tcp_rtt_ms,http_response_latency_ms,ssl_handshake_latency_ms,dns_response_latency_ms,cn_internal_rule_id_list,cn_internal_ioc_type_list,traffic_inbound_byte,traffic_inbound_pkt,traffic_outbound_byte,traffic_outbound_pkt,traffic_internal_byte,traffic_internal_pkt,traffic_through_byte,traffic_through_pkt,internal_query_num,external_query_num ]
|
||||
|
||||
sinks:
|
||||
cn_kafka_sink:
|
||||
type: kafka
|
||||
properties:
|
||||
topic: SESSION-RECORD-CN
|
||||
kafka.bootstrap.servers: {{ kafka_sink_servers }}
|
||||
kafka.retries: 0
|
||||
kafka.linger.ms: 10
|
||||
kafka.request.timeout.ms: 30000
|
||||
kafka.batch.size: 262144
|
||||
kafka.buffer.memory: 134217728
|
||||
kafka.max.request.size: 10485760
|
||||
kafka.compression.type: snappy
|
||||
kafka.security.protocol: SASL_PLAINTEXT
|
||||
kafka.sasl.mechanism: PLAIN
|
||||
kafka.sasl.jaas.config: 454f65ea6eef1256e3067104f82730e737b68959560966b811e7ff364116b03124917eb2b0f3596f14733aa29ebad9352644ce1a5c85991c6f01ba8a5e8f177a7ff0b2d3889a424249967b3870b50993d9644f239f0de82cdb13bdb502959e16afadffa49ef1e1d2b9c9b5113e619817
|
||||
format: json
|
||||
|
||||
application:
|
||||
env:
|
||||
name: etl_session_record_processed_kafka_to_cn_kafka
|
||||
shade.identifier: aes
|
||||
pipeline:
|
||||
object-reuse: true
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [ session_record_processor ]
|
||||
- name: session_record_processor
|
||||
downstream: [ remove_field_processor ]
|
||||
- name: remove_field_processor
|
||||
downstream: [ cn_kafka_sink ]
|
||||
- name: cn_kafka_sink
|
||||
downstream: [ ]
|
||||
@@ -0,0 +1,106 @@
|
||||
grootstream:
|
||||
knowledge_base:
|
||||
- name: cn_ip_location
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 1
|
||||
|
||||
- name: cn_ip_asn
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 2
|
||||
|
||||
- name: cn_idc_renter
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 11
|
||||
|
||||
- name: cn_link_direction
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 13
|
||||
|
||||
- name: cn_fqdn_category
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 5
|
||||
|
||||
- name: cn_fqdn_icp
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 4
|
||||
|
||||
- name: cn_fqdn_whois
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 6
|
||||
|
||||
- name: cn_dns_server
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 3
|
||||
|
||||
- name: cn_app_category
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 9
|
||||
|
||||
- name: cn_internal_ip
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 12
|
||||
|
||||
- name: cn_vpn_learning_ip
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 15
|
||||
|
||||
- name: cn_vpn_learning_domain
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 14
|
||||
|
||||
- name: cn_ioc_darkweb
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 8
|
||||
|
||||
- name: cn_ioc_malware
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 7
|
||||
|
||||
- name: cn_ip_tag_user_define
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base?category=cn_ip_tag_user_defined
|
||||
|
||||
- name: cn_domain_tag_user_define
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base?category=cn_domain_tag_user_defined
|
||||
|
||||
- name: cn_app_tag_user_define
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base?category=cn_app_tag_user_defined
|
||||
|
||||
- name: cn_rule
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.54:8090
|
||||
properties:
|
||||
token: 1a653ea0-d39b-4246-94b0-1ba95db4b6a7
|
||||
|
||||
properties:
|
||||
scheduler.knowledge_base.update.interval.minutes: 5
|
||||
@@ -0,0 +1,21 @@
|
||||
com.geedgenetworks.core.udf.AsnLookup
|
||||
com.geedgenetworks.core.udf.Eval
|
||||
com.geedgenetworks.core.udf.GenerateStringArray
|
||||
com.geedgenetworks.core.udf.GeoIpLookup
|
||||
com.geedgenetworks.core.udf.cn.L7ProtocolAndAppExtract
|
||||
com.geedgenetworks.core.udf.cn.IdcRenterLookup
|
||||
com.geedgenetworks.core.udf.cn.LinkDirectionLookup
|
||||
com.geedgenetworks.core.udf.cn.FqdnCategoryLookup
|
||||
com.geedgenetworks.core.udf.cn.IcpLookup
|
||||
com.geedgenetworks.core.udf.cn.FqdnWhoisLookup
|
||||
com.geedgenetworks.core.udf.cn.DnsServerInfoLookup
|
||||
com.geedgenetworks.core.udf.cn.AppCategoryLookup
|
||||
com.geedgenetworks.core.udf.cn.IpZoneLookup
|
||||
com.geedgenetworks.core.udf.cn.VpnLookup
|
||||
com.geedgenetworks.core.udf.cn.AnonymityLookup
|
||||
com.geedgenetworks.core.udf.cn.IocLookup
|
||||
com.geedgenetworks.core.udf.cn.UserDefineTagLookup
|
||||
com.geedgenetworks.core.udf.cn.FieldsMerge
|
||||
com.geedgenetworks.core.udf.cn.ArrayElementsPrepend
|
||||
com.geedgenetworks.core.udf.SnowflakeId
|
||||
com.geedgenetworks.core.udf.UnixTimestampConverter
|
||||
@@ -0,0 +1,4 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
SELECT status, is_builtin, rule_type, victim_ip,victim_country_region, victim_super_admin_area, victim_admin_area, victim_longitude, victim_latitude, offender_ip, offender_country_region, offender_super_admin_area, offender_admin_area, offender_longitude, offender_latitude, offender_ip, event_info, event_key, severity, event_type, duration_s, event_name, app, `domain`, event_id, rule_id, start_time, end_time, match_times
|
||||
FROM cyber_narrator_galaxy.cn_security_event where start_time >= toUnixTimestamp('2030-01-01 00:00:00') AND start_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
@@ -0,0 +1,37 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.cn_security_event_local on cluster ck_cluster
|
||||
add column IF NOT EXISTS offender_latitude Nullable(Float64) after offender_ip,
|
||||
add column IF NOT EXISTS offender_longitude Nullable(Float64) after offender_ip,
|
||||
add column IF NOT EXISTS offender_admin_area String after offender_ip,
|
||||
add column IF NOT EXISTS offender_super_admin_area String after offender_ip,
|
||||
add column IF NOT EXISTS offender_country_region String after offender_ip,
|
||||
add column IF NOT EXISTS victim_latitude Nullable(Float64) after victim_ip,
|
||||
add column IF NOT EXISTS victim_longitude Nullable(Float64) after victim_ip,
|
||||
add column IF NOT EXISTS victim_admin_area String after victim_ip,
|
||||
add column IF NOT EXISTS victim_super_admin_area String after victim_ip,
|
||||
add column IF NOT EXISTS victim_country_region String after victim_ip;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.cn_security_event on cluster ck_cluster
|
||||
add column IF NOT EXISTS offender_latitude Nullable(Float64) after offender_ip,
|
||||
add column IF NOT EXISTS offender_longitude Nullable(Float64) after offender_ip,
|
||||
add column IF NOT EXISTS offender_admin_area String after offender_ip,
|
||||
add column IF NOT EXISTS offender_super_admin_area String after offender_ip,
|
||||
add column IF NOT EXISTS offender_country_region String after offender_ip,
|
||||
add column IF NOT EXISTS victim_latitude Nullable(Float64) after victim_ip,
|
||||
add column IF NOT EXISTS victim_longitude Nullable(Float64) after victim_ip,
|
||||
add column IF NOT EXISTS victim_admin_area String after victim_ip,
|
||||
add column IF NOT EXISTS victim_super_admin_area String after victim_ip,
|
||||
add column IF NOT EXISTS victim_country_region String after victim_ip;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.cn_security_event on cluster ck_query
|
||||
add column IF NOT EXISTS offender_latitude Nullable(Float64) after offender_ip,
|
||||
add column IF NOT EXISTS offender_longitude Nullable(Float64) after offender_ip,
|
||||
add column IF NOT EXISTS offender_admin_area String after offender_ip,
|
||||
add column IF NOT EXISTS offender_super_admin_area String after offender_ip,
|
||||
add column IF NOT EXISTS offender_country_region String after offender_ip,
|
||||
add column IF NOT EXISTS victim_latitude Nullable(Float64) after victim_ip,
|
||||
add column IF NOT EXISTS victim_longitude Nullable(Float64) after victim_ip,
|
||||
add column IF NOT EXISTS victim_admin_area String after victim_ip,
|
||||
add column IF NOT EXISTS victim_super_admin_area String after victim_ip,
|
||||
add column IF NOT EXISTS victim_country_region String after victim_ip;
|
||||
@@ -0,0 +1,6 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
SELECT subscriber_id, app, imei, imsi, phone_number, apn, stat_time, sent_pkts, sent_bytes, received_pkts, received_bytes, sessions, traffic_inbound_byte, traffic_inbound_pkt, traffic_outbound_byte, traffic_outbound_pkt, traffic_internal_byte, traffic_internal_pkt, traffic_through_byte, traffic_through_pkt, tcp_c2s_lost_bytes_ratio, tcp_s2c_lost_bytes_ratio, tcp_lost_bytes_ratio, tcp_c2s_o3_pkts_ratio, tcp_s2c_o3_pkts_ratio, tcp_o3_pkts_ratio, tcp_c2s_rtx_bytes_ratio, tcp_s2c_rtx_bytes_ratio, tcp_rtx_bytes_ratio, tcp_c2s_rtx_pkts_ratio, tcp_s2c_rtx_pkts_ratio, tcp_rtx_pkts_ratio, avg_tcp_rtt_ms, avg_http_response_latency_ms, avg_ssl_handshake_latency_ms
|
||||
FROM cyber_narrator_galaxy.metric_subscriber_app where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT subscriber_id, imei, imsi, phone_number, apn, subscriber_longitude, subscriber_latitude, first_location, second_location, third_location, stat_time
|
||||
FROM cyber_narrator_galaxy.location_subscriber where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
@@ -0,0 +1,48 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.metric_subscriber_app_local on cluster ck_cluster drop column IF EXISTS subscriber_longitude, drop column IF EXISTS subscriber_latitude;
|
||||
ALTER table cyber_narrator_galaxy.metric_subscriber_app on cluster ck_cluster drop column IF EXISTS subscriber_longitude, drop column IF EXISTS subscriber_latitude;
|
||||
ALTER table cyber_narrator_galaxy.metric_subscriber_app on cluster ck_query drop column IF EXISTS subscriber_longitude, drop column IF EXISTS subscriber_latitude;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.location_subscriber_local ON CLUSTER ck_cluster (
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
subscriber_longitude Nullable(Float64),
|
||||
subscriber_latitude Nullable(Float64),
|
||||
first_location String,
|
||||
second_location String,
|
||||
third_location String,
|
||||
stat_time Int64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,subscriber_id) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.location_subscriber ON CLUSTER ck_cluster (
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
subscriber_longitude Nullable(Float64),
|
||||
subscriber_latitude Nullable(Float64),
|
||||
first_location String,
|
||||
second_location String,
|
||||
third_location String,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'location_subscriber_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.location_subscriber ON CLUSTER ck_query (
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
subscriber_longitude Nullable(Float64),
|
||||
subscriber_latitude Nullable(Float64),
|
||||
first_location String,
|
||||
second_location String,
|
||||
third_location String,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'location_subscriber_local', rand());
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_location_subscriber_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
LOCATION-SUBSCRIBER: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: location_subscriber_1
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.location_subscriber_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
23
cyber_narrator/upgrade/2024/CN-24.03/gohangout/start_all.sh
Normal file
23
cyber_narrator/upgrade/2024/CN-24.03/gohangout/start_all.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_location_subscriber_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_subscriber_app_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_security_event_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_relation_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_dynamic_attribute_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1
|
||||
4
cyber_narrator/upgrade/2024/CN-24.04/CN/README.md
Normal file
4
cyber_narrator/upgrade/2024/CN-24.04/CN/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
groot-stream version > 1.3.0
|
||||
|
||||
etl_session_record_kafka_to_cn_kafka 需要根据部署环境确定数据源的topic是SESSION-RECORD还是SESSION-RECORD-PROCESSED
|
||||
@@ -0,0 +1,384 @@
|
||||
sources:
|
||||
kafka_source:
|
||||
type: kafka
|
||||
# fields: # [array of object] Field List, if not set, all fields(Map<String, Object>) will be output.
|
||||
properties: # [object] Source Properties
|
||||
topic: SESSION-RECORD-PROCESSED
|
||||
kafka.bootstrap.servers: {{ tsg_olap_kafka_servers }}
|
||||
kafka.session.timeout.ms: 60000
|
||||
kafka.max.poll.records: 3000
|
||||
kafka.max.partition.fetch.bytes: 31457280
|
||||
kafka.security.protocol: SASL_PLAINTEXT
|
||||
kafka.sasl.mechanism: PLAIN
|
||||
kafka.sasl.jaas.config: 454f65ea6eef1256e3067104f82730e737b68959560966b811e7ff364116b03124917eb2b0f3596f14733aa29ebad9352644ce1a5c85991c6f01ba8a5e8f177a7ff0b2d3889a424249967b3870b50993d9644f239f0de82cdb13bdb502959e16afadffa49ef1e1d2b9c9b5113e619817
|
||||
kafka.group.id: etl_processed_session_record_kafka_to_cn_kafka
|
||||
kafka.auto.offset.reset: latest
|
||||
format: json
|
||||
|
||||
processing_pipelines:
|
||||
session_record_processor: # [object] Processing Pipeline
|
||||
type: com.geedgenetworks.core.processor.projection.ProjectionProcessorImpl
|
||||
remove_fields:
|
||||
output_fields:
|
||||
functions: # [array of object] Function List
|
||||
- function: SNOWFLAKE_ID
|
||||
lookup_fields: [ '' ]
|
||||
output_fields: [ cn_log_id ]
|
||||
parameters:
|
||||
data_center_id_num: 1
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ log_id ]
|
||||
parameters:
|
||||
value_expression: "log_id == null ? cn_log_id : log_id"
|
||||
|
||||
- function: UNIX_TIMESTAMP_CONVERTER
|
||||
lookup_fields: [ __timestamp ]
|
||||
output_fields: [ kafka_recv_time ]
|
||||
parameters:
|
||||
precision: seconds
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ recv_time ]
|
||||
parameters:
|
||||
value_expression: "recv_time == null ? kafka_recv_time : recv_time"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ domain ]
|
||||
parameters:
|
||||
value_expression: server_fqdn
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ domain_sld ]
|
||||
parameters:
|
||||
value_expression: server_domain
|
||||
|
||||
- function: CN_L7_PROTOCOL_AND_APP_EXTRACT
|
||||
parameters:
|
||||
decoded_path_field_name: decoded_path
|
||||
app_transition_field_name: app_transition
|
||||
l7_protocol_field_name: l7_protocol
|
||||
app_field_name: app
|
||||
l7_protocol: DHCP,DNS,FTP,GRE,GTP,HTTP,HTTPS,ICMP,IMAP,IMAPS,IPSEC,ISAKMP,XMPP,L2TP,LDAP,MMS,NETBIOS,NETFLOW,NTP,POP3,POP3S,RDP,PPTP,RADIUS,RTCP,RTP,RTSP,SIP,SMB,SMTP,SMTPS,SNMP,SSDP,SSH,SSL,STUN,TELNET,TFTP,OPENVPN,RTMP,TEREDO,FTPS,DTLS,SPDY,BJNP,QUIC,MDNS,Unknown TCP,Unknown UDP,Unknown Other,IKE,MAIL,SOCKS,DoH,SLP,SSL with ESNI,ISATAP,Stratum,SSL with ECH
|
||||
|
||||
- function: GEOIP_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ ]
|
||||
parameters:
|
||||
kb_name: cn_ip_location
|
||||
option: IP_TO_OBJECT
|
||||
geolocation_field_mapping:
|
||||
COUNTRY: client_country_region
|
||||
PROVINCE: client_super_admin_area
|
||||
CITY: client_admin_area
|
||||
LONGITUDE: client_longitude
|
||||
LATITUDE: client_latitude
|
||||
ISP: client_isp
|
||||
|
||||
- function: GEOIP_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ ]
|
||||
parameters:
|
||||
kb_name: cn_ip_location
|
||||
option: IP_TO_OBJECT
|
||||
geolocation_field_mapping:
|
||||
COUNTRY: server_country_region
|
||||
PROVINCE: server_super_admin_area
|
||||
CITY: server_admin_area
|
||||
LONGITUDE: server_longitude
|
||||
LATITUDE: server_latitude
|
||||
ISP: server_isp
|
||||
|
||||
- function: ASN_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_asn ]
|
||||
parameters:
|
||||
option: IP_TO_ASN
|
||||
kb_name: cn_ip_asn
|
||||
|
||||
- function: ASN_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_asn ]
|
||||
parameters:
|
||||
option: IP_TO_ASN
|
||||
kb_name: cn_ip_asn
|
||||
|
||||
- function: CN_IDC_RENTER_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_idc_renter ]
|
||||
parameters:
|
||||
kb_name: cn_idc_renter
|
||||
|
||||
- function: CN_IDC_RENTER_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_idc_renter ]
|
||||
parameters:
|
||||
kb_name: cn_idc_renter
|
||||
|
||||
- function: CN_LINK_DIRECTION_LOOKUP
|
||||
lookup_fields: [ in_link_id ]
|
||||
output_fields: [ in_link_direction ]
|
||||
parameters:
|
||||
kb_name: cn_link_direction
|
||||
|
||||
- function: CN_LINK_DIRECTION_LOOKUP
|
||||
lookup_fields: [ out_link_id ]
|
||||
output_fields: [ out_link_direction ]
|
||||
parameters:
|
||||
kb_name: cn_link_direction
|
||||
|
||||
- function: CN_FQDN_CATEGORY_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_category
|
||||
field_mapping:
|
||||
NAME: domain_category_name
|
||||
GROUP: domain_category_group
|
||||
REPUTATION_LEVEL: domain_reputation_level
|
||||
|
||||
- function: CN_ICP_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_icp_company_name ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_icp
|
||||
|
||||
- function: CN_FQDN_WHOIS_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_whois_org ]
|
||||
parameters:
|
||||
kb_name: cn_fqdn_whois
|
||||
|
||||
- function: CN_DNS_SERVER_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_dns_server ]
|
||||
parameters:
|
||||
kb_name: cn_dns_server
|
||||
|
||||
- function: CN_APP_CATEGORY_LOOKUP
|
||||
lookup_fields: [ app ]
|
||||
parameters:
|
||||
kb_name: cn_app_category
|
||||
field_mapping:
|
||||
CATEGORY: app_category
|
||||
SUBCATEGORY: app_subcategory
|
||||
COMPANY: app_company
|
||||
COMPANY_CATEGORY: app_company_category
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ client_zone ]
|
||||
parameters:
|
||||
value_expression: "flags & 8 == 8 ? 'internal' : 'external'"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ server_zone ]
|
||||
parameters:
|
||||
value_expression: "flags & 16 == 16 ? 'internal' : 'external'"
|
||||
|
||||
- function: CN_IP_ZONE_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_zone ]
|
||||
parameters:
|
||||
kb_name: none
|
||||
#kb_name: cn_internal_ip
|
||||
|
||||
- function: CN_IP_ZONE_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_zone ]
|
||||
parameters:
|
||||
kb_name: none
|
||||
#kb_name: cn_internal_ip
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sent_bytes ]
|
||||
parameters:
|
||||
value_expression: "sent_bytes == null ? 0 : sent_bytes"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sent_pkts ]
|
||||
parameters:
|
||||
value_expression: "sent_pkts == null ? 0 : sent_pkts"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ received_bytes ]
|
||||
parameters:
|
||||
value_expression: "received_bytes == null ? 0 : received_bytes"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ received_pkts ]
|
||||
parameters:
|
||||
value_expression: "received_pkts == null ? 0 : received_pkts"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? received_bytes : traffic_inbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? received_bytes : traffic_outbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? received_pkts : traffic_inbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? received_pkts : traffic_outbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? sent_bytes : traffic_outbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? sent_bytes : traffic_inbound_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_outbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'external' ? sent_pkts : traffic_outbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_inbound_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'internal' ? sent_pkts : traffic_inbound_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_internal_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'internal' ? sent_bytes + received_bytes : traffic_internal_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_internal_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' && server_zone == 'internal' ? sent_pkts + received_pkts : traffic_internal_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_through_byte ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'external' ? sent_bytes + received_bytes : traffic_through_byte"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ traffic_through_pkt ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' && server_zone == 'external' ? sent_pkts + received_pkts : traffic_through_pkt"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ sessions ]
|
||||
parameters:
|
||||
value_expression: "1"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ internal_query_num ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'internal' ? sessions : internal_query_num"
|
||||
|
||||
- function: EVAL
|
||||
output_fields: [ external_query_num ]
|
||||
parameters:
|
||||
value_expression: "client_zone == 'external' ? sessions : external_query_num"
|
||||
|
||||
- function: CN_ANONYMITY_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_node_type ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_darkweb
|
||||
option: IP_TO_NODE_TYPE
|
||||
|
||||
- function: CN_ANONYMITY_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_node_type ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_darkweb
|
||||
option: DOMAIN_TO_NODE_TYPE
|
||||
|
||||
- function: CN_IOC_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_malware ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_malware
|
||||
option: IP_TO_MALWARE
|
||||
|
||||
- function: CN_IOC_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_malware ]
|
||||
parameters:
|
||||
kb_name: cn_ioc_malware
|
||||
option: DOMAIN_TO_MALWARE
|
||||
|
||||
- function: CN_INTELLIGENCE_INDICATOR_LOOKUP
|
||||
lookup_fields: [ client_ip ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
parameters:
|
||||
kb_name: cn_intelligence_indicator
|
||||
option: IP_TO_TAG
|
||||
|
||||
- function: CN_INTELLIGENCE_INDICATOR_LOOKUP
|
||||
lookup_fields: [ server_ip ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
parameters:
|
||||
kb_name: cn_intelligence_indicator
|
||||
option: IP_TO_TAG
|
||||
|
||||
- function: CN_INTELLIGENCE_INDICATOR_LOOKUP
|
||||
lookup_fields: [ domain ]
|
||||
output_fields: [ domain_tags ]
|
||||
parameters:
|
||||
kb_name: cn_intelligence_indicator
|
||||
option: DOMAIN_TO_TAG
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ client_idc_renter,client_ip_tags ]
|
||||
output_fields: [ client_ip_tags ]
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ server_idc_renter,server_dns_server,server_node_type,server_malware,server_ip_tags ]
|
||||
output_fields: [ server_ip_tags ]
|
||||
|
||||
- function: GENERATE_STRING_ARRAY
|
||||
lookup_fields: [ domain_node_type,domain_malware,domain_tags ]
|
||||
output_fields: [ domain_tags ]
|
||||
|
||||
postprocessing_pipelines:
|
||||
remove_field_processor: # [object] Processing Pipeline
|
||||
type: com.geedgenetworks.core.processor.projection.ProjectionProcessorImpl
|
||||
output_fields: [ recv_time,log_id,flags,start_timestamp_ms,end_timestamp_ms,duration_ms,decoded_as,client_ip,server_ip,client_port,server_port,app,app_transition,decoded_path,ip_protocol,l7_protocol,out_link_id,in_link_id,subscriber_id,imei,imsi,phone_number,apn,http_url,dns_rcode,dns_qname,dns_qtype,dns_rr,out_link_direction,in_link_direction,server_fqdn,server_domain,domain,domain_sld,domain_category_name,domain_category_group,domain_reputation_level,domain_icp_company_name,domain_whois_org,domain_tags,client_zone,client_country_region,client_super_admin_area,client_admin_area,client_longitude,client_latitude,client_isp,client_asn,client_ip_tags,server_zone,server_country_region,server_super_admin_area,server_admin_area,server_longitude,server_latitude,server_isp,server_asn,server_ip_tags,app_category,app_subcategory,app_company,app_company_category,app_tags,sent_pkts,sent_bytes,received_pkts,received_bytes,sessions,tcp_c2s_lost_bytes,tcp_s2c_lost_bytes,tcp_c2s_o3_pkts,tcp_s2c_o3_pkts,tcp_c2s_rtx_bytes,tcp_s2c_rtx_bytes,tcp_c2s_rtx_pkts,tcp_s2c_rtx_pkts,tcp_rtt_ms,http_response_latency_ms,ssl_handshake_latency_ms,dns_response_latency_ms,cn_internal_rule_id_list,cn_internal_ioc_type_list,traffic_inbound_byte,traffic_inbound_pkt,traffic_outbound_byte,traffic_outbound_pkt,traffic_internal_byte,traffic_internal_pkt,traffic_through_byte,traffic_through_pkt,internal_query_num,external_query_num ]
|
||||
|
||||
sinks:
|
||||
cn_kafka_sink:
|
||||
type: kafka
|
||||
properties:
|
||||
topic: SESSION-RECORD-CN
|
||||
kafka.bootstrap.servers: {{ kafka_sink_servers }}
|
||||
kafka.retries: 0
|
||||
kafka.linger.ms: 10
|
||||
kafka.request.timeout.ms: 30000
|
||||
kafka.batch.size: 262144
|
||||
kafka.buffer.memory: 134217728
|
||||
kafka.max.request.size: 10485760
|
||||
kafka.compression.type: snappy
|
||||
kafka.security.protocol: SASL_PLAINTEXT
|
||||
kafka.sasl.mechanism: PLAIN
|
||||
kafka.sasl.jaas.config: 454f65ea6eef1256e3067104f82730e737b68959560966b811e7ff364116b03124917eb2b0f3596f14733aa29ebad9352644ce1a5c85991c6f01ba8a5e8f177a7ff0b2d3889a424249967b3870b50993d9644f239f0de82cdb13bdb502959e16afadffa49ef1e1d2b9c9b5113e619817
|
||||
format: json
|
||||
|
||||
application:
|
||||
env:
|
||||
name: etl_session_record_processed_kafka_to_cn_kafka
|
||||
shade.identifier: aes
|
||||
pipeline:
|
||||
object-reuse: true
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [ session_record_processor ]
|
||||
- name: session_record_processor
|
||||
downstream: [ remove_field_processor ]
|
||||
- name: remove_field_processor
|
||||
downstream: [ cn_kafka_sink ]
|
||||
- name: cn_kafka_sink
|
||||
downstream: [ ]
|
||||
88
cyber_narrator/upgrade/2024/CN-24.04/CN/grootstream.yaml
Normal file
88
cyber_narrator/upgrade/2024/CN-24.04/CN/grootstream.yaml
Normal file
@@ -0,0 +1,88 @@
|
||||
grootstream:
|
||||
knowledge_base:
|
||||
- name: cn_ip_location
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 1
|
||||
|
||||
- name: cn_ip_asn
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 2
|
||||
|
||||
- name: cn_idc_renter
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 11
|
||||
|
||||
- name: cn_link_direction
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 13
|
||||
|
||||
- name: cn_fqdn_category
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 5
|
||||
|
||||
- name: cn_fqdn_icp
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 4
|
||||
|
||||
- name: cn_fqdn_whois
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 6
|
||||
|
||||
- name: cn_dns_server
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 3
|
||||
|
||||
- name: cn_app_category
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 9
|
||||
|
||||
- name: cn_internal_ip
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 12
|
||||
|
||||
- name: cn_ioc_darkweb
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 8
|
||||
|
||||
- name: cn_ioc_malware
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 7
|
||||
|
||||
- name: cn_intelligence_indicator
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.55:9999/v1/knowledge_base
|
||||
files:
|
||||
- 16
|
||||
|
||||
- name: cn_rule
|
||||
fs_type: http
|
||||
fs_path: http://192.168.44.54:8090
|
||||
properties:
|
||||
token: 1a653ea0-d39b-4246-94b0-1ba95db4b6a7
|
||||
|
||||
properties:
|
||||
scheduler.knowledge_base.update.interval.minutes: 5
|
||||
21
cyber_narrator/upgrade/2024/CN-24.04/CN/udf.plugins
Normal file
21
cyber_narrator/upgrade/2024/CN-24.04/CN/udf.plugins
Normal file
@@ -0,0 +1,21 @@
|
||||
com.geedgenetworks.core.udf.AsnLookup
|
||||
com.geedgenetworks.core.udf.Eval
|
||||
com.geedgenetworks.core.udf.GenerateStringArray
|
||||
com.geedgenetworks.core.udf.GeoIpLookup
|
||||
com.geedgenetworks.core.udf.cn.L7ProtocolAndAppExtract
|
||||
com.geedgenetworks.core.udf.cn.IdcRenterLookup
|
||||
com.geedgenetworks.core.udf.cn.LinkDirectionLookup
|
||||
com.geedgenetworks.core.udf.cn.FqdnCategoryLookup
|
||||
com.geedgenetworks.core.udf.cn.IcpLookup
|
||||
com.geedgenetworks.core.udf.cn.FqdnWhoisLookup
|
||||
com.geedgenetworks.core.udf.cn.DnsServerInfoLookup
|
||||
com.geedgenetworks.core.udf.cn.AppCategoryLookup
|
||||
com.geedgenetworks.core.udf.cn.IpZoneLookup
|
||||
com.geedgenetworks.core.udf.cn.VpnLookup
|
||||
com.geedgenetworks.core.udf.cn.AnonymityLookup
|
||||
com.geedgenetworks.core.udf.cn.IocLookup
|
||||
com.geedgenetworks.core.udf.cn.FieldsMerge
|
||||
com.geedgenetworks.core.udf.cn.ArrayElementsPrepend
|
||||
com.geedgenetworks.core.udf.cn.IntelligenceIndicatorLookup
|
||||
com.geedgenetworks.core.udf.SnowflakeId
|
||||
com.geedgenetworks.core.udf.UnixTimestampConverter
|
||||
@@ -0,0 +1,20 @@
|
||||
SELECT subscriber_id, imei, imsi, phone_number, apn, subscriber_longitude, subscriber_latitude, first_location, second_location, third_location,data_source, stat_time
|
||||
FROM cyber_narrator_galaxy.location_subscriber where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT app_name, `domain`, ip, ip_country_region, ip_super_admin_area, ip_admin_area, ip_asn, ip_isp, domain_category_name, domain_category_group, app_category, app_subcategory, entity_tags, create_time, update_time
|
||||
FROM cyber_narrator_galaxy.session_relation_domain_ip_app where create_time >= toUnixTimestamp('2030-01-01 00:00:00') AND create_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT app_name ,domain ,ip ,ip_country_region ,ip_super_admin_area ,ip_admin_area ,ip_asn ,ip_isp ,domain_category_name ,domain_category_group ,app_category ,app_subcategory ,entity_tags ,stat_time
|
||||
FROM cyber_narrator_galaxy.raw_session_relation_domain_ip_app where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT ip, l7_protocol, port, stat_time
|
||||
FROM cyber_narrator_galaxy.raw_cn_ip_dynamic_attribute where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT app_name, subscriber_id, imei, imsi, phone_number, apn, app_category, app_subcategory, create_time, update_time
|
||||
FROM cyber_narrator_galaxy.session_relation_subscriber_app where create_time >= toUnixTimestamp('2030-01-01 00:00:00') AND create_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT app_name ,subscriber_id ,imei ,imsi ,phone_number ,apn ,app_category ,app_subcategory ,stat_time
|
||||
FROM cyber_narrator_galaxy.raw_session_relation_subscriber_app where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT app_name, `domain`, ip, ip_country_region, ip_super_admin_area, ip_admin_area, ip_asn, ip_isp, domain_category_name, domain_category_group, app_category, app_subcategory, entity_tags, create_time, update_time
|
||||
FROM cyber_narrator_galaxy.session_relation_domain_ip_app_view_metric where create_time >= toUnixTimestamp('2030-01-01 00:00:00') AND create_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT app_name,subscriber_id,imei,imsi,phone_number,apn,app_category,app_subcategory,create_time,update_time
|
||||
FROM cyber_narrator_galaxy.session_relation_subscriber_app_view_metric where create_time >= toUnixTimestamp('2030-01-01 00:00:00') AND create_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT ip, l7_protocol, port, create_time, update_time
|
||||
FROM cyber_narrator_galaxy.cn_ip_dynamic_attribute_view_metric where create_time >= toUnixTimestamp('2030-01-01 00:00:00') AND create_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
SELECT tag, stat_time, ip_sketch, domain_sketch,ip_sketch_agg_state, domain_sketch_agg_state
|
||||
FROM cyber_narrator_galaxy.metric_tag where stat_time >= toUnixTimestamp('2030-01-01 00:00:00') AND stat_time <toUnixTimestamp('2030-01-01 00:00:01');
|
||||
@@ -0,0 +1,331 @@
|
||||
set distributed_ddl_task_timeout = 180;
|
||||
|
||||
ALTER table cyber_narrator_galaxy.location_subscriber_local on cluster ck_cluster add column IF NOT EXISTS data_source String after third_location;
|
||||
ALTER table cyber_narrator_galaxy.location_subscriber on cluster ck_cluster add column IF NOT EXISTS data_source String after third_location;
|
||||
ALTER table cyber_narrator_galaxy.location_subscriber on cluster ck_query add column IF NOT EXISTS data_source String after third_location;
|
||||
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_entity_relation_view ON CLUSTER ck_cluster;
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_ip_dynamic_attribute_view ON CLUSTER ck_cluster;
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_entity_relation_view_metric ON CLUSTER ck_cluster;
|
||||
drop view IF EXISTS cyber_narrator_galaxy.cn_ip_dynamic_attribute_view_metric ON CLUSTER ck_cluster;
|
||||
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_entity_relation on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.cn_entity_relation on cluster ck_query;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_relation on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_relation on cluster ck_query;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_ip_dynamic_attribute on cluster ck_cluster;
|
||||
drop table IF EXISTS cyber_narrator_galaxy.metric_ip_dynamic_attribute on cluster ck_query;
|
||||
|
||||
|
||||
RENAME TABLE cyber_narrator_galaxy.cn_entity_relation_local TO cyber_narrator_galaxy.session_relation_domain_ip_app_local ON CLUSTER ck_cluster;
|
||||
RENAME TABLE cyber_narrator_galaxy.metric_relation_local TO cyber_narrator_galaxy.raw_session_relation_domain_ip_app_local ON CLUSTER ck_cluster;
|
||||
RENAME TABLE cyber_narrator_galaxy.metric_ip_dynamic_attribute_local TO cyber_narrator_galaxy.raw_cn_ip_dynamic_attribute_local ON CLUSTER ck_cluster;
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.session_relation_subscriber_app_local ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) ENGINE = MergeTree
|
||||
ORDER BY (subscriber_id, app_name)
|
||||
TTL toDateTime(update_time) + toIntervalSecond(2592000),
|
||||
toDateTime(update_time) + toIntervalSecond(1)
|
||||
GROUP BY subscriber_id, app_name
|
||||
SET create_time = min(create_time),
|
||||
update_time = max(update_time),
|
||||
imei = anyLast(imei),
|
||||
imsi = anyLast(imsi),
|
||||
phone_number = anyLast(phone_number),
|
||||
apn = anyLast(apn),
|
||||
app_category = anyLast(app_category),
|
||||
app_subcategory = anyLast(app_subcategory);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.session_relation_subscriber_app ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'session_relation_subscriber_app_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.session_relation_subscriber_app ON CLUSTER ck_query (
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'session_relation_subscriber_app_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_session_relation_subscriber_app_local ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
stat_time Int64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_session_relation_subscriber_app ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'raw_session_relation_subscriber_app_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_session_relation_subscriber_app ON CLUSTER ck_query (
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'raw_session_relation_subscriber_app_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.session_relation_domain_ip_app on cluster ck_query
|
||||
(
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'session_relation_domain_ip_app_local',
|
||||
rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.session_relation_domain_ip_app on cluster ck_cluster
|
||||
(
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
)
|
||||
ENGINE = Distributed('ck_cluster',
|
||||
'cyber_narrator_galaxy',
|
||||
'session_relation_domain_ip_app_local',
|
||||
rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_session_relation_domain_ip_app ON CLUSTER ck_cluster (
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'raw_session_relation_domain_ip_app_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_session_relation_domain_ip_app ON CLUSTER ck_query (
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'raw_session_relation_domain_ip_app_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_cn_ip_dynamic_attribute ON CLUSTER ck_cluster (
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'raw_cn_ip_dynamic_attribute_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.raw_cn_ip_dynamic_attribute ON CLUSTER ck_query (
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
stat_time Int64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'raw_cn_ip_dynamic_attribute_local', rand());
|
||||
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.session_relation_domain_ip_app_view_metric on cluster ck_cluster
|
||||
TO cyber_narrator_galaxy.session_relation_domain_ip_app_local
|
||||
(
|
||||
app_name String,
|
||||
domain String,
|
||||
ip String,
|
||||
ip_country_region String,
|
||||
ip_super_admin_area String,
|
||||
ip_admin_area String,
|
||||
ip_asn String,
|
||||
ip_isp String,
|
||||
domain_category_name String,
|
||||
domain_category_group String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
entity_tags Array(String),
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
app_name AS app_name,
|
||||
domain AS domain,
|
||||
ip AS ip,
|
||||
anyLast(ip_country_region) AS ip_country_region,
|
||||
anyLast(ip_super_admin_area) AS ip_super_admin_area,
|
||||
anyLast(ip_admin_area) AS ip_admin_area,
|
||||
anyLast(ip_asn) AS ip_asn,
|
||||
anyLast(ip_isp) AS ip_isp,
|
||||
anyLast(domain_category_name) AS domain_category_name,
|
||||
anyLast(domain_category_group) AS domain_category_group,
|
||||
anyLast(app_category) AS app_category,
|
||||
anyLast(app_subcategory) AS app_subcategory,
|
||||
groupUniqArrayArray(entity_tags) AS entity_tags,
|
||||
min(c1.stat_time) AS create_time,
|
||||
max(c1.stat_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.raw_session_relation_domain_ip_app_local AS c1
|
||||
GROUP BY
|
||||
ip,
|
||||
app_name,
|
||||
domain;
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.session_relation_subscriber_app_view_metric on cluster ck_cluster
|
||||
TO cyber_narrator_galaxy.session_relation_subscriber_app_local
|
||||
(
|
||||
app_name String,
|
||||
subscriber_id String,
|
||||
imei String,
|
||||
imsi String,
|
||||
phone_number String,
|
||||
apn String,
|
||||
app_category String,
|
||||
app_subcategory String,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
app_name AS app_name,
|
||||
subscriber_id AS subscriber_id,
|
||||
anyLast(imei) AS imei,
|
||||
anyLast(imsi) AS imsi,
|
||||
anyLast(phone_number) AS phone_number,
|
||||
anyLast(apn) AS apn,
|
||||
anyLast(app_category) AS app_category,
|
||||
anyLast(app_subcategory) AS app_subcategory,
|
||||
min(c1.stat_time) AS create_time,
|
||||
max(c1.stat_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.raw_session_relation_subscriber_app_local AS c1
|
||||
GROUP BY
|
||||
subscriber_id,
|
||||
app_name;
|
||||
|
||||
CREATE MATERIALIZED VIEW if not exists cyber_narrator_galaxy.cn_ip_dynamic_attribute_view_metric on cluster ck_cluster
|
||||
TO cyber_narrator_galaxy.cn_ip_dynamic_attribute_local
|
||||
(
|
||||
ip String,
|
||||
l7_protocol String,
|
||||
port Int64,
|
||||
create_time Int64,
|
||||
update_time Int64
|
||||
) AS
|
||||
SELECT
|
||||
ip AS ip,
|
||||
l7_protocol AS l7_protocol,
|
||||
port AS port,
|
||||
min(c1.stat_time) AS create_time,
|
||||
max(c1.stat_time) AS update_time
|
||||
FROM cyber_narrator_galaxy.raw_cn_ip_dynamic_attribute_local AS c1
|
||||
GROUP BY
|
||||
ip,
|
||||
l7_protocol,
|
||||
port;
|
||||
|
||||
|
||||
CREATE TABLE cyber_narrator_galaxy.metric_tag_local
|
||||
(
|
||||
tag String,
|
||||
stat_time Int64,
|
||||
ip_sketch String,
|
||||
domain_sketch String,
|
||||
ip_sketch_agg_state AggregateFunction(uniqTheta,String) MATERIALIZED base64Decode(ip_sketch),
|
||||
domain_sketch_agg_state AggregateFunction(uniqTheta,String) MATERIALIZED base64Decode(domain_sketch)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMMDD(toDate(stat_time))
|
||||
ORDER BY (stat_time, tag);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_tag ON CLUSTER ck_cluster (
|
||||
tag String,
|
||||
stat_time Int64,
|
||||
ip_sketch String,
|
||||
domain_sketch String,
|
||||
ip_sketch_agg_state AggregateFunction(uniqTheta,String) MATERIALIZED base64Decode(ip_sketch),
|
||||
domain_sketch_agg_state AggregateFunction(uniqTheta,String) MATERIALIZED base64Decode(domain_sketch)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_tag_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_tag ON CLUSTER ck_query (
|
||||
tag String,
|
||||
stat_time Int64,
|
||||
ip_sketch String,
|
||||
domain_sketch String,
|
||||
ip_sketch_agg_state AggregateFunction(uniqTheta,String) MATERIALIZED base64Decode(ip_sketch),
|
||||
domain_sketch_agg_state AggregateFunction(uniqTheta,String) MATERIALIZED base64Decode(domain_sketch)
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_tag_local', rand());
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_raw_cn_ip_dynamic_attribute_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_raw_session_relation_domain_ip_app_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_raw_session_relation_subscriber_app_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
RAW-CN-IP-DYNAMIC-ATTRIBUTE: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-raw_cn_ip_dynamic_attribute_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.raw_cn_ip_dynamic_attribute_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
RAW-SESSION-RELATION-DOMAIN-IP-APP: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-raw_session_relation_domain_ip_app_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.raw_session_relation_domain_ip_app_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
RAW-SESSION-RELATION-SUBSCRIBER-APP: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: tsg-raw_session_relation_subscriber_app_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.raw_session_relation_subscriber_app_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
24
cyber_narrator/upgrade/2024/CN-24.04/start_all.sh
Normal file
24
cyber_narrator/upgrade/2024/CN-24.04/start_all.sh
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_location_subscriber_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_subscriber_app_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_security_event_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_raw_session_relation_domain_ip_app_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_raw_session_relation_subscriber_app_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_raw_cn_ip_dynamic_attribute_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1
|
||||
Reference in New Issue
Block a user