CN 10版本ck增加DNS、Link预聚合表
This commit is contained in:
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_qname_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_qtype_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rcode_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rr_a_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rr_aaaa_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_rr_cname_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_dns_server_ip_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
|
||||
#gohangout启动文件
|
||||
|
||||
#配置文件名称,注意配置文件需要放在$BASE_DIR/conf文件夹下
|
||||
YML_NAME=k2ck_metric_link_cn
|
||||
#gohangout的二进制启动文件路径
|
||||
BASE_DIR=$(cd $(dirname $0); cd ../../; pwd)
|
||||
#日志级别,1,5,10,数字越大日志越详细
|
||||
LOG_LV=5
|
||||
#线程总数,即开启worker数量,每个线程拥有自己的filter, output,占用多份内存,默认1个线程
|
||||
THREAD_SUM=3
|
||||
#进程总数
|
||||
PROCESS_SUM=$1
|
||||
|
||||
if [ ! -d "$BASE_DIR/logs" ]; then
|
||||
mkdir -p $BASE_DIR/logs
|
||||
fi
|
||||
|
||||
echo "###########################$(date +%Y%m%d%H%M%S)###########################" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
id=0
|
||||
logid=0
|
||||
while true ; do
|
||||
NUM=`ps -ef | grep -w ${YML_NAME}.yml | grep -v grep |wc -l`
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
time_stamp=$(date +%Y%m%d%H%M%S)
|
||||
if [ "${NUM}" -lt ${PROCESS_SUM} ];then
|
||||
$BASE_DIR/bin/gohangout --config $BASE_DIR/conf/${YML_NAME}.yml --logtostderr --v ${LOG_LV} --worker ${THREAD_SUM} >> $BASE_DIR/logs/${YML_NAME}.log 2>&1 &
|
||||
echo "${time_stamp} ---> the ${YML_NAME}APP restart ---> $id" >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
((logid++))
|
||||
((id++))
|
||||
if [ ${logid} -gt ${PROCESS_SUM} ];then
|
||||
logid=0
|
||||
pids=$(ps -ef | grep -w ${YML_NAME}.yml | grep -v grep | awk '{print $2}')
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
echo "kafka or clickhouse is error,reset gohangout,sleep 30s... ..." >> $BASE_DIR/bin/start_log/${YML_NAME}_restart.log
|
||||
sleep 30
|
||||
fi
|
||||
#大于设置进程数,杀掉所有进程,重启
|
||||
elif [ "${NUM}" -gt ${PROCESS_SUM} ];then
|
||||
for pid in $pids
|
||||
do
|
||||
kill -9 $pid
|
||||
done
|
||||
id=0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-QNAME: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_qname
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_qname_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-QTYPE: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_qtype_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_qtype_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RCODE: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rcode_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rcode_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RR-A: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rr_a_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rr_a_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RR-AAAA: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rr_aaaa_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rr_aaaa_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-RR-CNAME: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_rr_cname_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_rr_cname_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-DNS-SERVER-IP: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_dns_server_ip_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_dns_server_ip_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
28
CN发布版本更新记录/CN-22.10/gohangout/conf/k2ck_metric_link_cn.yml
Normal file
28
CN发布版本更新记录/CN-22.10/gohangout/conf/k2ck_metric_link_cn.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
inputs:
|
||||
- Kafka:
|
||||
topic:
|
||||
METRIC-LINK: 1
|
||||
#assign:
|
||||
# weblog: [0,9]
|
||||
codec: json
|
||||
consumer_settings:
|
||||
bootstrap.servers: "192.168.44.55:9092"
|
||||
group.id: metric_link_cn
|
||||
max.partition.fetch.bytes: '10485760'
|
||||
auto.commit.interval.ms: '5000'
|
||||
# from.beginning: 'true'
|
||||
# sasl.mechanism: PLAIN
|
||||
# sasl.user: admin
|
||||
# sasl.password: admin-secret
|
||||
|
||||
outputs:
|
||||
- Clickhouse:
|
||||
table: 'cyber_narrator_galaxy.metric_link_local'
|
||||
username: 'default'
|
||||
password: 'ceiec2019'
|
||||
hosts:
|
||||
- 'tcp://192.168.44.55:9001'
|
||||
bulk_actions: 100000
|
||||
flush_interval: 30
|
||||
concurrent: 2
|
||||
conn_max_life_time: 60
|
||||
22
CN发布版本更新记录/CN-22.10/gohangout/start_all.sh
Normal file
22
CN发布版本更新记录/CN-22.10/gohangout/start_all.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/sh
|
||||
|
||||
STARTDIR=$(cd $(dirname $0); pwd)
|
||||
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_cn_record.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_region_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_asn_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_idc_renter_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_application_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_domain_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_http_host_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_ssl_sni_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_protocol_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_link_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qname_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_server_ip_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_qtype_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rcode_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_a_cn.sh $1 > /dev/null 2>&1 &
|
||||
nohup $STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_aaaa_cn.sh $1 > /dev/null 2>&1 &
|
||||
$STARTDIR/bin/ghoStart/start_gohangout_k2ck_metric_dns_rr_cname_cn.sh $1
|
||||
@@ -1609,3 +1609,389 @@ CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_protocol ON CLUSTER ck_q
|
||||
avg_http_response_latency_ms Float64,
|
||||
avg_ssl_con_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_protocol_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_link_local ON CLUSTER ck_cluster (
|
||||
client_country String,
|
||||
client_province String,
|
||||
client_region String,
|
||||
server_country String,
|
||||
server_province String,
|
||||
server_region String,
|
||||
common_egress_link_id Int64,
|
||||
common_ingress_link_id Int64,
|
||||
egress_link_direction String,
|
||||
ingress_link_direction String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Float64,
|
||||
s2c_tcp_lostlen_ratio Float64,
|
||||
tcp_lostlen_ratio Float64,
|
||||
c2s_tcp_unorder_num_ratio Float64,
|
||||
s2c_tcp_unorder_num_ratio Float64,
|
||||
tcp_unorder_num_ratio Float64,
|
||||
c2s_byte_retrans_ratio Float64,
|
||||
s2c_byte_retrans_ratio Float64,
|
||||
byte_retrans_ratio Float64,
|
||||
c2s_pkt_retrans_ratio Float64,
|
||||
s2c_pkt_retrans_ratio Float64,
|
||||
pkt_retrans_ratio Float64,
|
||||
avg_establish_latency_ms Float64,
|
||||
avg_http_response_latency_ms Float64,
|
||||
avg_ssl_con_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_link ON CLUSTER ck_cluster (
|
||||
client_country String,
|
||||
client_province String,
|
||||
client_region String,
|
||||
server_country String,
|
||||
server_province String,
|
||||
server_region String,
|
||||
common_egress_link_id Int64,
|
||||
common_ingress_link_id Int64,
|
||||
egress_link_direction String,
|
||||
ingress_link_direction String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Float64,
|
||||
s2c_tcp_lostlen_ratio Float64,
|
||||
tcp_lostlen_ratio Float64,
|
||||
c2s_tcp_unorder_num_ratio Float64,
|
||||
s2c_tcp_unorder_num_ratio Float64,
|
||||
tcp_unorder_num_ratio Float64,
|
||||
c2s_byte_retrans_ratio Float64,
|
||||
s2c_byte_retrans_ratio Float64,
|
||||
byte_retrans_ratio Float64,
|
||||
c2s_pkt_retrans_ratio Float64,
|
||||
s2c_pkt_retrans_ratio Float64,
|
||||
pkt_retrans_ratio Float64,
|
||||
avg_establish_latency_ms Float64,
|
||||
avg_http_response_latency_ms Float64,
|
||||
avg_ssl_con_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_link_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_link ON CLUSTER ck_query (
|
||||
client_country String,
|
||||
client_province String,
|
||||
client_region String,
|
||||
server_country String,
|
||||
server_province String,
|
||||
server_region String,
|
||||
common_egress_link_id Int64,
|
||||
common_ingress_link_id Int64,
|
||||
egress_link_direction String,
|
||||
ingress_link_direction String,
|
||||
stat_time Int64,
|
||||
common_c2s_pkt_num Int64,
|
||||
common_c2s_byte_num Int64,
|
||||
common_s2c_pkt_num Int64,
|
||||
common_s2c_byte_num Int64,
|
||||
common_sessions Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_inbound_pkt Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
traffic_outbound_pkt Int64,
|
||||
traffic_internal_byte Int64,
|
||||
traffic_internal_pkt Int64,
|
||||
traffic_through_byte Int64,
|
||||
traffic_through_pkt Int64,
|
||||
c2s_tcp_lostlen_ratio Float64,
|
||||
s2c_tcp_lostlen_ratio Float64,
|
||||
tcp_lostlen_ratio Float64,
|
||||
c2s_tcp_unorder_num_ratio Float64,
|
||||
s2c_tcp_unorder_num_ratio Float64,
|
||||
tcp_unorder_num_ratio Float64,
|
||||
c2s_byte_retrans_ratio Float64,
|
||||
s2c_byte_retrans_ratio Float64,
|
||||
byte_retrans_ratio Float64,
|
||||
c2s_pkt_retrans_ratio Float64,
|
||||
s2c_pkt_retrans_ratio Float64,
|
||||
pkt_retrans_ratio Float64,
|
||||
avg_establish_latency_ms Float64,
|
||||
avg_http_response_latency_ms Float64,
|
||||
avg_ssl_con_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_link_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_server_ip_local ON CLUSTER ck_cluster (
|
||||
server_ip String,
|
||||
server_country String,
|
||||
server_city String,
|
||||
server_isp String,
|
||||
server_org String,
|
||||
server_role Array(String),
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,server_ip) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_server_ip ON CLUSTER ck_cluster (
|
||||
server_ip String,
|
||||
server_country String,
|
||||
server_city String,
|
||||
server_isp String,
|
||||
server_org String,
|
||||
server_role Array(String),
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_server_ip_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_server_ip ON CLUSTER ck_query (
|
||||
server_ip String,
|
||||
server_country String,
|
||||
server_city String,
|
||||
server_isp String,
|
||||
server_org String,
|
||||
server_role Array(String),
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_server_ip_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qname_local ON CLUSTER ck_cluster (
|
||||
qname String,
|
||||
qname_sld String,
|
||||
qname_tld String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,qname) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qname ON CLUSTER ck_cluster (
|
||||
qname String,
|
||||
qname_sld String,
|
||||
qname_tld String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qname_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qname ON CLUSTER ck_query (
|
||||
qname String,
|
||||
qname_sld String,
|
||||
qname_tld String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qname_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qtype_local ON CLUSTER ck_cluster (
|
||||
qtype Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,qtype) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qtype ON CLUSTER ck_cluster (
|
||||
qtype Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qtype_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_qtype ON CLUSTER ck_query (
|
||||
qtype Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_qtype_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rcode_local ON CLUSTER ck_cluster (
|
||||
rcode Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rcode) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rcode ON CLUSTER ck_cluster (
|
||||
rcode Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rcode_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rcode ON CLUSTER ck_query (
|
||||
rcode Int64,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rcode_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_a_local ON CLUSTER ck_cluster (
|
||||
rr_a String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rr_a) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_a ON CLUSTER ck_cluster (
|
||||
rr_a String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_a_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_a ON CLUSTER ck_query (
|
||||
rr_a String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_a_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_aaaa_local ON CLUSTER ck_cluster (
|
||||
rr_aaaa String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rr_aaaa) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_aaaa ON CLUSTER ck_cluster (
|
||||
rr_aaaa String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_aaaa_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_aaaa ON CLUSTER ck_query (
|
||||
rr_aaaa String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_aaaa_local', rand());
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_cname_local ON CLUSTER ck_cluster (
|
||||
rr_cname String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(stat_time)) ORDER BY (stat_time,rr_cname) SETTINGS index_granularity = 8192;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_cname ON CLUSTER ck_cluster (
|
||||
rr_cname String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_cname_local', rand());
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cyber_narrator_galaxy.metric_dns_rr_cname ON CLUSTER ck_query (
|
||||
rr_cname String,
|
||||
stat_time Int64,
|
||||
query_num Int64,
|
||||
traffic_inbound_byte Int64,
|
||||
traffic_outbound_byte Int64,
|
||||
internal_query_num Int64,
|
||||
external_query_num Int64,
|
||||
avg_response_latency_ms Float64
|
||||
) ENGINE = Distributed('ck_cluster', 'cyber_narrator_galaxy', 'metric_dns_rr_cname_local', rand());
|
||||
Reference in New Issue
Block a user