This commit is contained in:
zhanghongqing
2022-05-06 15:28:52 +08:00
parent 98795307a7
commit 1f34cac94e
54 changed files with 8755 additions and 0 deletions

View File

@@ -0,0 +1,55 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "top_client_ip_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"source",
"device_group",
"data_center",
"order_by"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num", "fieldName" : "session_num" },
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TOP-CLIENT-IP",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,56 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "top_external_host_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"destination",
"device_group",
"data_center",
"order_by"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num", "fieldName" : "session_num" },
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"maxRowsPerSegment": 5000000,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TOP-EXTERNAL-HOST",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,55 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "top_internal_host_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"source",
"device_group",
"data_center",
"order_by"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num", "fieldName" : "session_num" },
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TOP-INTERNAL-HOST",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,56 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "top_server_ip_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"destination",
"device_group",
"data_center",
"order_by"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num", "fieldName" : "session_num" },
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"maxRowsPerSegment": 5000000,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TOP-SERVER-IP",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,55 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "top_user_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"subscriber_id",
"device_group",
"data_center",
"order_by"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num", "fieldName" : "session_num" },
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TOP-USER",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,56 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "top_website_domain_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"domain",
"device_group",
"data_center",
"order_by"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num", "fieldName" : "session_num" },
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"maxRowsPerSegment": 5000000,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TOP-WEBSITE-DOMAIN",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,63 @@
{
"type": "kafka",
"dataSchema": {
"dataSource": "traffic_app_stat_log",
"parser": {
"type": "string",
"parseSpec": {
"format": "json",
"timestampSpec": {
"column": "stat_time",
"format": "posix"
},
"dimensionsSpec": {
"dimensions": [
"device_group",
"data_center",
"app_name"
]
}
}
},
"metricsSpec" : [
{ "type" : "longSum", "name" : "session_num","fieldName" : "session_num"},
{ "type" : "longSum", "name" : "c2s_pkt_num", "fieldName" : "c2s_pkt_num" },
{ "type" : "longSum", "name" : "s2c_pkt_num", "fieldName" : "s2c_pkt_num" },
{ "type" : "longSum", "name" : "c2s_byte_num", "fieldName" : "c2s_byte_num" },
{ "type" : "longSum", "name" : "s2c_byte_num", "fieldName" : "s2c_byte_num" }
],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "DAY",
"queryGranularity": {"type": "period", "period": "PT5M"},
"rollup": true
},
"transformSpec" :{
"transforms":[
],
"filter": {
"type": "not",
"field":{ "type": "selector", "dimension": "app_name", "value": "" }
}
}
},
"tuningConfig": {
"type": "kafka",
"resetOffsetAutomatically": true,
"reportParseExceptions": false
},
"ioConfig": {
"topic": "TRAFFIC-APP-STAT",
"taskCount": 1,
"replicas": 1,
"taskDuration": "PT1H",
"completionTimeout": "PT30M",
"earlyMessageRejectionPeriod": "PT6H",
"consumerProperties": {
"bootstrap.servers": "kafkabootstrap",
"sasl.mechanism": "PLAIN",
"security.protocol": "SASL_PLAINTEXT",
"sasl.jaas.config": "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"admin\" password=\"galaxy2019\";"
}
}
}

View File

@@ -0,0 +1,67 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=active_defence_event.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=ACTIVE-DEFENCE-EVENT
#补全数据 输出 topic
sink.kafka.topic=ACTIVE-DEFENCE-EVENT-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=active-defence-log-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=1
#转换函数并行度
transform.parallelism=1
#kafka producer 并行度
sink.parallelism=1
#数据中心,取值范围(0-63)
data.center.id.num=1
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=gtpc_record.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=GTPC-RECORD
#补全数据 输出 topic
sink.kafka.topic=GTPC-RECORD-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=gtpc-record-log-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=1
#转换函数并行度
transform.parallelism=1
#kafka producer 并行度
sink.parallelism=1
#数据中心,取值范围(0-63)
data.center.id.num=2
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=interim_session_record.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=INTERIM-SESSION-RECORD
#补全数据 输出 topic
sink.kafka.topic=INTERIM-SESSION-RECORD-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=linterim-session-record-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=5
#转换函数并行度
transform.parallelism=1
#kafka producer 并行度
sink.parallelism=5
#数据中心,取值范围(0-63)
data.center.id.num=3
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=proxy_event.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=PROXY-EVENT
#补全数据 输出 topic
sink.kafka.topic=PROXY-EVENT-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=proxy-event-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=5
#转换函数并行度
transform.parallelism=5
#kafka producer 并行度
sink.parallelism=5
#数据中心,取值范围(0-63)
data.center.id.num=4
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=radius_record.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=RADIUS-RECORD
#补全数据 输出 topic
sink.kafka.topic=RADIUS-RECORD-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=radius-record-log-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=1
#转换函数并行度
transform.parallelism=1
#kafka producer 并行度
sink.parallelism=1
#数据中心,取值范围(0-63)
data.center.id.num=5
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=security_event.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=SECURITY-EVENT
#补全数据 输出 topic
sink.kafka.topic=SECURITY-EVENT-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=security-event-log-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=5
#转换函数并行度
transform.parallelism=10
#kafka producer 并行度
sink.parallelism=5
#数据中心,取值范围(0-63)
data.center.id.num=6
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=session_record.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=SESSION-RECORD
#补全数据 输出 topic
sink.kafka.topic=SESSION-RECORD-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=session-record-log-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=5
#转换函数并行度
transform.parallelism=10
#kafka producer 并行度
sink.parallelism=5
#数据中心,取值范围(0-63)
data.center.id.num=0
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=sys_packet_capture_event.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=SYS-PACKET-CAPTURE-EVENT
#补全数据 输出 topic
sink.kafka.topic=SYS-PACKET-CAPTURE-EVENT-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=sys-packet-log-20210124-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=1
#转换函数并行度
transform.parallelism=1
#kafka producer 并行度
sink.parallelism=1
#数据中心,取值范围(0-63)
data.center.id.num=7
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=transaction_record.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=TRANSACTION-RECORD
#补全数据 输出 topic
sink.kafka.topic=TRANSACTION-RECORD-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=transaction-record-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=5
#转换函数并行度
transform.parallelism=10
#kafka producer 并行度
sink.parallelism=5
#数据中心,取值范围(0-63)
data.center.id.num=8
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,66 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.13:9094,192.168.44.14:9094,192.168.44.15:9094,192.168.44.16:9094
#zookeeper 地址 用于配置log_id
zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#hbase zookeeper地址 用于连接HBase
hbase.zookeeper.servers=192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
#--------------------------------HTTP/定位库------------------------------#
#定位库地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos topology_common_config.properties namespace
nacos.common.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=voip_record.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=VOIP-CONVERSATION-RECORD
#补全数据 输出 topic
sink.kafka.topic=VOIP-RECORD-COMPLETED
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=voip-record-20220408-1
#生产者压缩模式 none or snappy
producer.kafka.compression.type=none
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=1
#转换函数并行度
transform.parallelism=1
#kafka producer 并行度
sink.parallelism=1
#数据中心,取值范围(0-63)
data.center.id.num=9
#hbase 更新时间如填写0则不更新缓存
hbase.tick.tuple.freq.secs=180
#--------------------------------默认值配置------------------------------#
#0不需要补全原样输出日志1需要补全
log.need.complete=1

View File

@@ -0,0 +1,40 @@
#!/bin/bash
#启动storm任务脚本
source /etc/profile
#任务jar所在目录
BASE_DIR=`pwd`
#primordial
primordial='log-completion-schema-220318-Nacos.jar'
#jar name
JAR_NAME='log-completion-schema_tmp.jar'
#并行度
PARALLELISM=1
yes | cp -r $primordial $JAR_NAME
#cd $BASE_DIR
jar -xvf $BASE_DIR/$JAR_NAME service_flow_config.properties
function read_dir(){
for file in `ls $1` #注意此处这是两个反引号,表示运行系统命令
do
if [ -d $1"/"$file ] #注意此处之间一定要加上空格,否则会报错
then
read_dir $1"/"$file
else
num=`flink list | grep "$file" | wc -l`
if [ $num -eq "0" ];then
cat $1$file > $BASE_DIR/service_flow_config.properties
jar -uvf $BASE_DIR/$JAR_NAME service_flow_config.properties
flink run -d -c com.zdjizhi.topology.LogFlowWriteTopology -p $PARALLELISM $JAR_NAME $file
fi
fi
done
}
if [ $# != 1 ];then
echo "usage: ./startall.sh [Configuration path]"
exit 1
fi
#读取第一个参数 为配置文件目录名称
read_dir $1
rm -rf $JAR_NAME

View File

@@ -0,0 +1,415 @@
--
CREATE TABLE session_record_completed_log(
common_schema_type VARCHAR,
common_recv_time BIGINT,
common_client_ip VARCHAR,
common_server_ip VARCHAR,
http_host VARCHAR,
http_domain VARCHAR,
common_device_group VARCHAR,
common_data_center VARCHAR,
common_l4_protocol VARCHAR,
common_internal_ip VARCHAR,
common_external_ip VARCHAR,
common_subscriber_id VARCHAR,
common_sessions BIGINT,
common_app_label VARCHAR,
common_c2s_pkt_num BIGINT,
common_s2c_pkt_num BIGINT,
common_c2s_byte_num BIGINT,
common_s2c_byte_num BIGINT,
common_processing_time BIGINT,
stat_time as TO_TIMESTAMP(FROM_UNIXTIME(common_recv_time)),
WATERMARK FOR stat_time AS stat_time - INTERVAL '1' MINUTE)
WITH(
'connector' = 'kafka',
'properties.group.id' = 'kafka-indexing-service',
'topic' = 'SESSION-RECORD-COMPLETED',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
'scan.startup.mode' = 'latest-offset',
'sink.parallelism'='1',
--'sink.parallelism'='60',
'format' = 'json'
);
--client
CREATE TABLE top_client_ip_log(
source VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
order_by VARCHAR,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
)WITH(
'connector' = 'upsert-kafka',
'topic' = 'TOP-CLIENT-IP',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
--'sink.parallelism'='1',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_client_ip_view as
SELECT common_client_ip as source,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group ,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where (common_l4_protocol = 'IPv6_TCP' or common_l4_protocol = 'IPv4_TCP') and (common_device_group<>'' or common_data_center<>'')
group by common_client_ip,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_client_ip_log
(SELECT `source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'sessions' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY session_num DESC) as rownum
FROM
top_client_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'packets' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_pkt_num+s2c_pkt_num DESC) as rownum
FROM
top_client_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'bytes' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_byte_num+s2c_byte_num DESC) as rownum
FROM
top_client_ip_view)
WHERE rownum <= 10000);
--server:
CREATE TABLE top_server_ip_log(
destination VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
order_by VARCHAR,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
)WITH(
'connector' = 'upsert-kafka',
'topic' = 'TOP-SERVER-IP',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
--'sink.parallelism'='1',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_server_ip_view as
SELECT common_server_ip as `destination`,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group ,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where (common_l4_protocol = 'IPv6_TCP' or common_l4_protocol = 'IPv4_TCP') and (common_device_group<>'' or common_data_center<>'')
group by common_server_ip,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_server_ip_log
(SELECT `destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'sessions' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY session_num DESC) as rownum
FROM
top_server_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'packets' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_pkt_num+s2c_pkt_num DESC) as rownum
FROM
top_server_ip_view)
WHERE rownum <= 10000)
union all
(SELECT destination, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
destination, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'bytes' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_byte_num+s2c_byte_num DESC) as rownum
FROM
top_server_ip_view)
WHERE rownum <= 10000);
--internal
CREATE TABLE top_internal_ip_log (
source VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
order_by VARCHAR,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
) WITH (
'connector' = 'upsert-kafka',
'topic' = 'TOP-INTERNAL-HOST',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
--'sink.parallelism'='1',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_common_internal_ip_view as
SELECT common_internal_ip as `source`,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where common_internal_ip<>'' and (common_device_group<>'' or common_data_center<>'')
group by common_internal_ip,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_internal_ip_log
(SELECT `source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'sessions' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY session_num DESC) as rownum
FROM
top_common_internal_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'packets' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_pkt_num+s2c_pkt_num DESC) as rownum
FROM
top_common_internal_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`source`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'bytes' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_byte_num+s2c_byte_num DESC) as rownum
FROM
top_common_internal_ip_view)
WHERE rownum <= 10000);
--external:
CREATE TABLE top_external_ip_log (
destination VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
order_by VARCHAR,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
) WITH (
'connector' = 'upsert-kafka',
'topic' = 'TOP-EXTERNAL-HOST',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
--'sink.parallelism'='1',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_common_external_ip_view as
SELECT common_external_ip as `destination`,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where common_external_ip<>'' and (common_device_group<>'' or common_data_center<>'')
group by common_external_ip,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_external_ip_log
(SELECT `destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'sessions' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY session_num DESC) as rownum
FROM
top_common_external_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'packets' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_pkt_num+s2c_pkt_num DESC) as rownum
FROM
top_common_external_ip_view)
WHERE rownum <= 10000)
union all
(SELECT `destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`destination`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'bytes' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_byte_num+s2c_byte_num DESC) as rownum
FROM
top_common_external_ip_view)
WHERE rownum <= 10000);
--website_domain
CREATE TABLE top_website_domain_log (
domain VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
order_by VARCHAR,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
) WITH (
'connector' = 'upsert-kafka',
'topic' = 'TOP-WEBSITE-DOMAIN',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
--'sink.parallelism'='1',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_website_domain_view as
SELECT http_domain as `domain`,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group ,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where http_domain<>'' and (common_device_group<>'' or common_data_center<>'')
group by http_domain,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_website_domain_log
(SELECT `domain`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`domain`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'sessions' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY session_num DESC) as rownum
FROM
top_website_domain_view)
WHERE rownum <= 10000)
union all
(SELECT `domain`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`domain`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'packets' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_pkt_num+s2c_pkt_num DESC) as rownum
FROM
top_website_domain_view)
WHERE rownum <= 10000)
union all
(SELECT `domain`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`domain`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'bytes' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_byte_num+s2c_byte_num DESC) as rownum
FROM
top_website_domain_view)
WHERE rownum <= 10000);
--user:
CREATE TABLE top_user_log (
subscriber_id VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
order_by VARCHAR,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
) WITH (
'connector' = 'upsert-kafka',
'topic' = 'TOP-USER',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
--'sink.parallelism'='1',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_user_log_view as
SELECT common_subscriber_id as `subscriber_id`,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group ,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where common_subscriber_id <>'' and (common_device_group<>'' or common_data_center<>'')
group by common_subscriber_id,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_user_log
(SELECT `subscriber_id`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`subscriber_id`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'sessions' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY session_num DESC) as rownum
FROM
top_user_log_view)
WHERE rownum <= 10000)
union all
(SELECT `subscriber_id`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`subscriber_id`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'packets' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_pkt_num+s2c_pkt_num DESC) as rownum
FROM
top_user_log_view)
WHERE rownum <= 10000)
union all
(SELECT `subscriber_id`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,order_by,device_group,data_center,stat_time FROM
(SELECT
`subscriber_id`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,'bytes' as order_by,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center ORDER BY c2s_byte_num+s2c_byte_num DESC) as rownum
FROM
top_user_log_view)
WHERE rownum <= 10000);
--app
CREATE TABLE top_app_log (
app_name VARCHAR,
session_num BIGINT,
c2s_pkt_num BIGINT,
s2c_pkt_num BIGINT,
c2s_byte_num BIGINT,
s2c_byte_num BIGINT,
device_group VARCHAR,
data_center VARCHAR,
stat_time BIGINT,
PRIMARY KEY (stat_time) NOT ENFORCED
) WITH (
'connector' = 'upsert-kafka',
'topic' = 'TRAFFIC-APP-STAT',
'properties.bootstrap.servers' = 'kafkabootstrap',
'properties.security.protocol'='SASL_PLAINTEXT',
'properties.sasl.mechanism'='PLAIN',
'properties.sasl.jaas.config'= 'org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="galaxy2019";',
'key.format' = 'json',
'value.format' = 'json'
);
CREATE VIEW top_app_log_view as
SELECT common_app_label as `app_name`,sum(common_sessions) as session_num,sum(common_c2s_pkt_num) as c2s_pkt_num,sum(common_s2c_pkt_num) as s2c_pkt_num,sum(common_c2s_byte_num) as c2s_byte_num,sum(common_s2c_byte_num) as s2c_byte_num,common_device_group as device_group ,common_data_center as data_center,UNIX_TIMESTAMP(CAST(TUMBLE_END(stat_time,INTERVAL '5' MINUTE) as VARCHAR)) as stat_time
FROM session_record_completed_log
where common_app_label<>'' and (common_device_group<>'' or common_data_center<>'')
group by common_app_label,common_device_group,common_data_center,TUMBLE(stat_time,INTERVAL '5' MINUTE);
INSERT INTO top_app_log
(SELECT `app_name`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,device_group,data_center,stat_time FROM
(SELECT
`app_name`, session_num, c2s_pkt_num,s2c_pkt_num,c2s_byte_num,s2c_byte_num,device_group,data_center,stat_time,
ROW_NUMBER() OVER (PARTITION BY device_group,data_center )
FROM
top_app_log_view));

View File

@@ -0,0 +1,55 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#--------------------------------HTTP------------------------------#
#kafka 证书地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=liveChart_interim.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=INTERIM-SESSION-RECORD
#补全数据 输出 topic
sink.kafka.topic=TRAFFIC-PROTOCOL-STAT
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=liveCharts-interim-20220408-1
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=1
#map函数并行度
parse.parallelism=2
#第一次窗口计算并行度
first.window.parallelism=2
#第二次窗口计算并行度
second.window.parallelism=2
#producer 并行度
sink.parallelism=1
#初次随机预聚合窗口时间
first.count.window.time=5
#聚合窗口时间
second.count.window.time=15

View File

@@ -0,0 +1,55 @@
#--------------------------------地址配置------------------------------#
#管理kafka地址
source.kafka.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#管理输出kafka地址
sink.kafka.servers=192.168.44.11:9094,192.168.44.14:9094,192.168.44.15:9094
#--------------------------------HTTP------------------------------#
#kafka 证书地址
tools.library=/home/bigdata/topology/dat/
#--------------------------------nacos配置------------------------------#
#nacos 地址
nacos.server=192.168.44.67:8848
#nacos namespace
nacos.schema.namespace=f507879a-8b1b-4330-913e-83d4fcdc14bb
#nacos data id
nacos.data.id=liveChart_session.json
#--------------------------------Kafka消费组信息------------------------------#
#kafka 接收数据topic
source.kafka.topic=SESSION-RECORD
#补全数据 输出 topic
sink.kafka.topic=TRAFFIC-PROTOCOL-STAT
#读取topic,存储该spout id的消费offset信息可通过该拓扑命名;具体存储offset的位置确定下次读取不重复的数据
group.id=liveCharts-session-20220408-1
#--------------------------------topology配置------------------------------#
#consumer 并行度
source.parallelism=5
#map函数并行度
parse.parallelism=5
#第一次窗口计算并行度
first.window.parallelism=5
#第二次窗口计算并行度
second.window.parallelism=5
#producer 并行度
sink.parallelism=5
#初次随机预聚合窗口时间
first.count.window.time=5
#二次聚合窗口时间
second.count.window.time=15

View File

@@ -0,0 +1,41 @@
#!/bin/bash
#启动storm任务脚本
source /etc/profile
#任务jar所在目录
BASE_DIR=`pwd`
#primordial
primordial='log-olap-analysis-schema-220323-Nacos.jar'
#jar name
JAR_NAME='log-completion-schema_tmp.jar'
#并行度
PARALLELISM=1
yes | cp -r $primordial $JAR_NAME
#cd $BASE_DIR
jar -xvf $BASE_DIR/$JAR_NAME service_flow_config.properties
function read_dir(){
for file in `ls $1` #注意此处这是两个反引号,表示运行系统命令
do
if [ -d $1"/"$file ] #注意此处之间一定要加上空格,否则会报错
then
read_dir $1"/"$file
else
num=`flink list | grep "$file" | wc -l`
if [ $num -eq "0" ];then
cat $1$file > $BASE_DIR/service_flow_config.properties
jar -uvf $BASE_DIR/$JAR_NAME service_flow_config.properties
flink run -d -c com.zdjizhi.topology.StreamAggregateTopology -p $PARALLELISM $JAR_NAME $file
fi
fi
done
}
if [ $# != 1 ];then
echo "usage: ./startall.sh [Configuration path]"
exit 1
fi
#读取第一个参数 为配置文件目录名称
read_dir $1
rm -rf $JAR_NAME

View File

@@ -0,0 +1,40 @@
#!/bin/bash
#启动storm任务脚本
source /etc/profile
#任务jar所在目录
BASE_DIR=`pwd`
#primordial
primordial='radius-account-knowledge-220413-sink.jar'
#jar name
JAR_NAME='radius-account-knowledge_tmp.jar'
#并行度
PARALLELISM=1
yes | cp -r $primordial $JAR_NAME
#cd $BASE_DIR
jar -xvf $BASE_DIR/$JAR_NAME service_flow_config.properties
function read_dir(){
for file in `ls $1` #注意此处这是两个反引号,表示运行系统命令
do
if [ -d $1"/"$file ] #注意此处之间一定要加上空格,否则会报错
then
read_dir $1"/"$file
else
num=`flink list | grep "$file" | wc -l`
if [ $num -eq "0" ];then
cat $1$file > $BASE_DIR/service_flow_config.properties
jar -uvf $BASE_DIR/$JAR_NAME service_flow_config.properties
flink run -d -c com.zdjizhi.topology.RadiusKnowledgeTopology -p $PARALLELISM $JAR_NAME $file
fi
fi
done
}
if [ $# != 1 ];then
echo "usage: ./startall.sh [Configuration path]"
exit 1
fi
#读取第一个参数 为配置文件目录名称
read_dir $1
rm -rf $JAR_NAME

View File

@@ -0,0 +1,39 @@
#!/bin/bash
#启动storm任务脚本
source /etc/profile
#任务jar所在目录
BASE_DIR=`pwd`
#primordial
primordial='log-stream-voip-relation-220413-sink.jar'
#jar name
JAR_NAME='log-stream-voip-relation_tmp.jar'
#并行度
PARALLELISM=1
yes | cp -r $primordial $JAR_NAME
#cd $BASE_DIR
jar -xvf $BASE_DIR/$JAR_NAME service_flow_config.properties
function read_dir(){
for file in `ls $1` #注意此处这是两个反引号,表示运行系统命令
do
if [ -d $1"/"$file ] #注意此处之间一定要加上空格,否则会报错
then
read_dir $1"/"$file
else
num=`flink list | grep "$file" | wc -l`
if [ $num -eq "0" ];then
cat $1$file > $BASE_DIR/service_flow_config.properties
jar -uvf $BASE_DIR/$JAR_NAME service_flow_config.properties
flink run -d -c com.zdjizhi.topology.VoIpRelationTopology -p $PARALLELISM $JAR_NAME $file
fi
fi
done
}
if [ $# != 1 ];then
echo "usage: ./startall.sh [Configuration path]"
exit 1
fi
#读取第一个参数 为配置文件目录名称
read_dir $1
rm -rf $JAR_NAME

View File

@@ -0,0 +1,99 @@
{
"version": "1.0",
"name": "ClickHouse-Raw",
"namespace": "ClickHouse",
"filters": [
{
"name":"@start",
"value": "'2021-10-19 10:00:00'"
},
{
"name":"@end",
"value": "'2021-10-20 11:00:00'"
},
{
"name":"@common_filter",
"value": [
"common_log_id=1153021139190754263",
"common_client_ip='118.180.48.74'",
"common_client_ip='120.242.132.200'",
"common_internal_ip='223.116.37.192'",
"common_server_ip='8.8.8.8'",
"common_server_ip='114.114.114.114'",
"common_server_ip!='114.114.114.114'",
"common_server_ip='120.239.72.226'",
"common_external_ip='111.10.53.14'",
"common_client_port=52607",
"common_server_port=443",
"common_c2s_pkt_num>5",
"common_s2c_pkt_num>5",
"common_c2s_byte_num>100",
"common_s2c_byte_num<200",
"common_schema_type='DNS'",
"common_establish_latency_ms>200",
"common_con_duration_ms>10000",
"common_stream_trace_id=1153021139190754263",
"common_tcp_client_isn=2857077935",
"common_tcp_server_isn=0",
"http_domain='qq.com'",
"http_domain!='qq.com'",
"http_domain='yunser.com'",
"mail_account='abc@xx.com'",
"mail_subject='test'",
"dns_qname='qbwup.imtt.qq.com'",
"ssl_sni='mmbiz.qpic.cn'",
"ssl_sni='openai.qq.com'",
"ssl_con_latency_ms>100",
"ssl_ja3_hash='a0e9f5d64349fb13191bc781f81f42e1'",
"common_client_ip='36.189.226.21' and common_server_ip='8.8.8.8'",
"common_server_ip='111.10.53.14' and common_server_port=443",
"common_server_ip like '120.239%'",
"common_server_ip not like '120.239%'",
"common_server_ip like '%114.114%'",
"mail_account like 'abc@%'",
"http_domain like '%baidu.com%'",
"ssl_sni like '%google.com'",
"http_domain like 'baidu%'",
"http_domain like '%baidu.com%'",
"common_client_ip in ('120.239.72.226','114.114.114.114')",
"common_client_ip not in ('120.239.72.226','114.114.114.114')",
"common_server_ip='116.177.248.126' and notEmpty(http_domain)",
"common_server_ip='116.177.248.126' and common_client_ip='120.242.132.200'",
"common_server_ip='116.177.248.126' and common_stream_trace_id=1153021139190754263",
"common_client_ip='120.242.132.200' and common_server_ip='116.177.248.126'",
"http_domain='qq.com' or common_server_ip='120.239.72.226'",
"common_server_port not in (80,443)",
"http_domain not like '%qq.com'"
]
},
{
"name":"@index_filter",
"value": [
"common_log_id=1153021139190754263",
"common_client_ip='118.180.48.74'",
"common_client_ip='120.242.132.200'",
"common_server_ip='114.114.114.114'",
"common_server_ip!='114.114.114.114'",
"common_server_ip='120.239.72.226'",
"http_domain='qq.com'",
"http_domain!='qq.com'",
"http_domain='yunser.com'",
"ssl_sni='mmbiz.qpic.cn'",
"ssl_sni='openai.qq.com'",
"common_server_ip like '120.239%'",
"common_server_ip not like '120.239%'",
"common_server_ip like '%114.114%'",
"common_subscriber_id='%test%'",
"http_domain like 'baidu%'",
"http_domain like '%baidu.com%'",
"common_client_ip in ('120.239.72.226','114.114.114.114')",
"common_client_ip not in ('120.239.72.226','114.114.114.114')",
"common_server_ip='116.177.248.126' and notEmpty(http_domain)",
"common_server_ip='116.177.248.126' and common_client_ip='120.242.132.200'",
"common_server_ip='116.177.248.126' and common_stream_trace_id=1153021139190754263",
"common_client_ip='120.242.132.200' and common_server_ip='116.177.248.126'",
"http_domain='qq.com' or common_server_ip='120.239.72.226'"
]
}
]
}

View File

@@ -0,0 +1,118 @@
--Q01.Count(1)
select count(1) FROM tsg_galaxy_v3.session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end)
--Q02.All Fields Query (default)
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) LIMIT 30
--Q03.All Fields Query order by Time desc
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 30
--Q04.All Fields Query order by Time asc
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time asc LIMIT 30
--Q05.All Fields Query by Filter
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @common_filter ORDER BY common_recv_time DESC LIMIT 30
--Q06.Default Fields Query by Filter
SELECT toDateTime(common_recv_time) AS common_recv_time , common_log_id , common_client_ip , common_client_port , common_server_ip , common_server_port FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @common_filter ORDER BY common_recv_time DESC LIMIT 30
--Q07.All Fields Query (sub query by time)
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE toDateTime(common_recv_time) IN ( SELECT toDateTime(common_recv_time) FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 30 ) AND common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 30
--Q08.All Fields Query (sub query by log id)
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( SELECT common_log_id FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 30 ) AND common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 30
--Q09.Default Field Query (sub query by time)
SELECT toDateTime(common_recv_time) AS common_recv_time_str , common_log_id , common_client_ip , common_client_port , common_server_ip , common_server_port FROM tsg_galaxy_v3.session_record AS session_record WHERE toDateTime(common_recv_time) IN ( SELECT toDateTime(common_recv_time) FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY toDateTime(common_recv_time) DESC LIMIT 30 ) AND common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 30
--Q10.Default Field Query (sub query by log id)
SELECT toDateTime(common_recv_time) AS common_recv_time_str , common_log_id , common_client_ip , common_client_port , common_server_ip , common_server_port FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( select common_log_id FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY toDateTime(common_recv_time) DESC LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end)) ORDER BY common_recv_time DESC LIMIT 30
--Q11.Default Field Query by Server IP (sub query by log id with Index Table)
SELECT toDateTime(common_recv_time) AS common_recv_time_str , common_log_id , common_client_ip , common_client_port , common_server_ip , common_server_port FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( Select common_log_id FROM tsg_galaxy_v3.session_record_common_server_ip AS session_record_common_server_ip WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ORDER BY toDateTime(common_recv_time) DESC LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ) ORDER BY common_recv_time DESC LIMIT 30
--Q12.Default Field Query by Client IP (sub query by log id with Index Table)
SELECT toDateTime(common_recv_time) AS common_recv_time_str , common_log_id , common_client_ip , common_client_port , common_server_ip , common_server_port FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( Select common_log_id FROM tsg_galaxy_v3.session_record_common_client_ip AS session_record_common_client_ip WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ORDER BY toDateTime(common_recv_time) DESC LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ) ORDER BY common_recv_time DESC LIMIT 30
--Q13.Default Field Query by Domain (sub query by log id with Index Table)
SELECT toDateTime(common_recv_time) AS common_recv_time_str , common_log_id , common_client_ip , common_client_port , common_server_ip , common_server_port FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( Select common_log_id FROM tsg_galaxy_v3.session_record_http_domain AS session_record_http_domain WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ORDER BY toDateTime(common_recv_time) DESC LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ) ORDER BY common_recv_time DESC LIMIT 30
--Q14.All Fields Query by Client IP (sub query by log id with index Table)
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( SELECT common_log_id FROM tsg_galaxy_v3.session_record_common_client_ip AS session_record_common_client_ip WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ORDER BY toDateTime(common_recv_time) DESC LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ) ORDER BY common_recv_time desc LIMIT 30
--Q15.All Fields Query by Server IP(sub query by log id with index Table)
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( SELECT common_log_id FROM tsg_galaxy_v3.session_record_common_server_ip AS session_record_common_server_ip WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ORDER BY common_recv_time LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ) ORDER BY common_recv_time desc LIMIT 30
--Q16.All Fields Query by Domain(sub query by log id with index Table)
SELECT * FROM tsg_galaxy_v3.session_record AS session_record WHERE common_log_id IN ( SELECT common_log_id FROM tsg_galaxy_v3.session_record_http_domain AS session_record_http_domain WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ORDER BY common_recv_time LIMIT 30 ) AND ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) AND @index_filter ) ORDER BY common_recv_time desc LIMIT 30
--Q17.Session Logs Sent to Database Trend(Time Grain 5 minute)
SELECT toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE))) AS "Receive Time", count(common_log_id) AS "logs" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ) ) GROUP BY "Receive Time" LIMIT 10000
--Q18.Traffic Bandwidth Trend(Time Grain 30 second)
SELECT toDateTime(toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 30 SECOND)))) AS stat_time, sum(common_c2s_byte_num) AS bytes_sent, sum(common_s2c_byte_num) AS bytes_received, sum(common_c2s_byte_num + common_s2c_byte_num) AS bytes, sum(common_c2s_pkt_num + common_s2c_pkt_num) AS packets, sum(common_sessions) AS sessions FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) GROUP BY stat_time ORDER BY stat_time ASC LIMIT 10000
--Q19.Log Tend by Type (Time Grain 5 minute)
SELECT toDateTime(toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE)))) AS stat_time, common_schema_type AS type, sum(common_sessions) AS sessions, sum(common_c2s_byte_num + common_s2c_byte_num) AS bytes, sum(common_c2s_pkt_num + common_s2c_pkt_num) AS packets FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) GROUP BY stat_time, common_schema_type ORDER BY stat_time ASC LIMIT 10000
--Q20.Traffic Metrics Analytic
SELECT round(sum(common_s2c_byte_num) * 8 / 300,2) AS trafficInBits, round(sum(common_c2s_byte_num) * 8 / 300,2) AS trafficOutBits, round(sum(common_s2c_byte_num + common_c2s_byte_num) * 8 / 300,2) AS trafficTotalBits, round(sum(common_s2c_pkt_num) / 300,2) AS trafficInPackets, round(sum(common_c2s_pkt_num) / 300,2) AS trafficOutPackets, round(sum(common_s2c_pkt_num + common_c2s_pkt_num) / 300,2) AS trafficTotalPackets, round(sum(common_sessions) / 300,2) AS sessions FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end)
--Q21.Traffic Endpoints Metrics Trend(Time Grain 5 minute)
SELECT toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE))) AS "Receive Time", uniq(common_internal_ip) AS "Unique Internal IP", uniq(common_external_ip) AS "Unique External IP", uniq(common_subscriber_id) AS "Unique Subscriber ID", sum(coalesce(common_c2s_byte_num + common_s2c_byte_num, 0)) AS "Bytes", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Receive Time" LIMIT 10000
--Q22.Endpoint Unique Num by L4 Protocol
SELECT 'all' AS type, uniq(common_client_ip) AS client_ips, uniq(common_internal_ip) AS internal_ips, uniq(common_server_ip) AS server_ips, uniq(common_external_ip) AS external_ips, uniq(common_subscriber_id) as subscriber_ids FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) UNION ALL SELECT 'tcp' AS type, uniq(common_client_ip) AS client_ips, uniq(common_internal_ip) AS internal_ips, uniq(common_server_ip) AS server_ips, uniq(common_external_ip) AS external_ips, uniq(common_subscriber_id) as subscriber_ids FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND common_l4_protocol IN ( 'IPv4_TCP', 'IPv6_TCP' ) UNION ALL SELECT 'UDP' AS type, uniq(common_client_ip) AS client_ips, uniq(common_internal_ip) AS internal_ips, uniq(common_server_ip) AS server_ips, uniq(common_external_ip) AS external_ips, uniq(common_subscriber_id) as subscriber_ids FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND common_l4_protocol IN ( 'IPv4_UDP', 'IPv6_UDP' )
--Q23.One-sided Connection Trend(Time Grain 5 minute)
SELECT toDateTime(toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE)))) AS stat_time, (CASE WHEN common_stream_dir = 1 THEN 'c2s' WHEN common_stream_dir = 2 THEN 's2c' WHEN common_stream_dir = 3 THEN 'double' ELSE 'None' END) AS type, sum(common_sessions) AS sessions FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) GROUP BY stat_time, common_stream_dir ORDER BY stat_time ASC LIMIT 10000
--Q24. Estimated One-sided Sessions with Bandwidth
SELECT toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE))) AS "Receive Time", sum(common_sessions) AS "sessions", sum(if(common_stream_dir <> 3, common_sessions, 0)) AS "one_side_sessions", sum(coalesce(common_c2s_byte_num + common_s2c_byte_num, 0)) AS "Bytes", round(one_side_sessions / sessions, 2) AS one_side_percent FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Receive Time" LIMIT 10000
--Q25.Estimated TCP Sequence Gap Loss
SELECT toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE))) AS "Receive Time", sum(common_c2s_byte_num + common_s2c_byte_num) AS "bytes", sum(common_c2s_tcp_lostlen + common_s2c_tcp_lostlen) AS "gap_loss_bytes", round(gap_loss_bytes / bytes, 2) AS gap_loss_percent FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( common_l4_protocol IN ( 'IPv4_TCP', 'IPv6_TCP' ) ) GROUP BY "Receive Time" LIMIT 10000
--Q26.Top30 Server IP by Bytes
SELECT "server_ip" AS "server_ip" , SUM(coalesce("bytes",0)) AS "bytes" , SUM(coalesce("bytes_sent",0)) AS "Sent" , SUM(coalesce("bytes_received",0)) AS "Received" , SUM(coalesce("sessions",0)) AS "sessions" FROM ( SELECT SUM(coalesce(common_c2s_byte_num,0)) AS "bytes_sent" , SUM(coalesce(common_s2c_byte_num,0)) AS "bytes_received" , SUM(common_c2s_byte_num+common_s2c_byte_num) AS "bytes" , SUM(coalesce(common_sessions,0)) AS "sessions" , common_server_ip AS "server_ip" FROM tsg_galaxy_v3.session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty( common_server_ip) ) GROUP BY "server_ip" ORDER BY "bytes" desc ) GROUP BY "server_ip" ORDER BY "bytes" desc LIMIT 30
--Q27.Top30 Client IP by Sessions
SELECT common_client_ip , COUNT(*) AS sessions FROM tsg_galaxy_v3.session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) GROUP BY common_client_ip ORDER BY sessions desc LIMIT 0,30
--Q28.Top30 TCP Server Ports by Sessions
SELECT "Server Port" AS "Server Port", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT common_server_port AS "Server Port", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( common_l4_protocol IN ( 'IPv4_TCP', 'IPv6_TCP' ) ) GROUP BY "Server Port" LIMIT 1048576) GROUP BY "Server Port" ORDER BY "Sessions" DESC LIMIT 30
--Q29.Top30 Domian by Bytes
SELECT "domain" AS "Website Domain" , SUM(coalesce("bytes",0)) AS "Throughput" FROM ( SELECT SUM(coalesce(common_c2s_byte_num,0)) AS "bytes_sent" , SUM(coalesce(common_s2c_byte_num,0)) AS "bytes_received" , SUM(coalesce(common_c2s_byte_num+common_s2c_byte_num,0)) AS "bytes" , http_domain AS "domain" FROM tsg_galaxy_v3.session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty( http_domain) ) GROUP BY "domain" ORDER BY "bytes" desc ) GROUP BY "domain" ORDER BY "Throughput" desc LIMIT 30
--Q30.Top30 Endpoint Devices by Bandwidth
SELECT "device_id" AS "device_id", sum(coalesce("bytes", 0)) AS "bytes", sum(coalesce("bytes_sent", 0)) AS "Sent", sum(coalesce("bytes_received", 0)) AS "Received" FROM (SELECT sum(coalesce(common_c2s_byte_num, 0)) AS "bytes_sent", sum(coalesce(common_s2c_byte_num, 0)) AS "bytes_received", sum(common_c2s_byte_num + common_s2c_byte_num) AS bytes, common_device_id AS "device_id" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "device_id" ORDER BY "bytes" DESC LIMIT 1048576) GROUP BY "device_id" ORDER BY "bytes" DESC LIMIT 30
--Q31.Top30 Domain by Unique Client IP
SELECT "Http.Domain" AS "Http.Domain", sum(coalesce("Client IP", 0)) AS "Client IP" FROM (SELECT http_domain AS "Http.Domain", uniq(common_client_ip) AS "Client IP" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty(http_domain) ) GROUP BY "Http.Domain" ORDER BY "Client IP" DESC LIMIT 1048576) GROUP BY "Http.Domain" ORDER BY "Client IP" DESC LIMIT 30
--Q32.Top100 Most Time Consuming Domains
SELECT "Domain" AS "Domain", avg(coalesce("Avg Establish Latency(ms)", 0)) AS "Avg Establish Latency(ms)" FROM (SELECT http_domain AS "Domain", avg(coalesce(common_establish_latency_ms, 0)) AS "Avg Establish Latency(ms)" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty(http_domain) ) GROUP BY "Domain" LIMIT 1048576) GROUP BY "Domain" ORDER BY "Avg Establish Latency(ms)" DESC LIMIT 100
--Q33.Top30 Sources by Sessions
SELECT "source" AS "source", sum(coalesce("sessions", 0)) AS "sessions" FROM (SELECT coalesce(nullif(common_subscriber_id, ''), nullif(common_client_ip, '')) AS "source", sum(coalesce(common_sessions, 0)) AS "sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "source" ORDER BY "sessions" DESC LIMIT 1048576) GROUP BY "source" ORDER BY "sessions" DESC LIMIT 30
--Q34.Top30 Destinations by Sessions
SELECT "destination" AS "destination", sum(coalesce("sessions", 0)) AS "sessions" FROM (SELECT coalesce(nullif(http_domain, ''), nullif(common_server_ip, '')) AS "destination", sum(coalesce(common_sessions, 0)) AS "sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "destination" ORDER BY "sessions" DESC LIMIT 1048576) GROUP BY "destination" ORDER BY "sessions" DESC LIMIT 30
--Q35.Top30 Destination Regions by Bandwidth
SELECT "server_location" AS "server_location", sum(coalesce("bytes", 0)) AS "bytes", sum(coalesce("bytes_sent", 0)) AS "Sent", sum(coalesce("bytes_received", 0)) AS "Received" FROM (SELECT arrayElement(splitByString(',', common_server_location), length(splitByString(',', common_server_location))) AS "server_location", sum(coalesce(common_c2s_byte_num, 0)) AS "bytes_sent", sum(coalesce(common_s2c_byte_num, 0)) AS "bytes_received", sum(coalesce(common_c2s_byte_num + common_s2c_byte_num, 0)) AS "bytes", sum(coalesce(common_sessions, 0)) AS "sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "server_location" ORDER BY "bytes" DESC LIMIT 1048576) GROUP BY "server_location" ORDER BY "bytes" DESC LIMIT 30
--Q36.Top30 URLS by Sessions
SELECT "Http URL" AS "Http URL", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT http_url AS "Http URL", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Http URL" LIMIT 1048576) GROUP BY "Http URL" ORDER BY "Sessions" DESC LIMIT 30
--Q37.Top30 Destination Transmission APP by Bandwidth
SELECT "server_ip" AS "server_ip", groupUniqArray(coalesce("trans_app", 0)) AS "trans_app", sum(coalesce("bytes", 0)) AS "bytes", sum(coalesce("bytes_sent", 0)) AS "Sent", sum(coalesce("bytes_received", 0)) AS "Received" FROM (SELECT sum(coalesce(common_c2s_byte_num, 0)) AS "bytes_sent", sum(coalesce(common_s2c_byte_num, 0)) AS "bytes_received", sum(common_c2s_byte_num + common_s2c_byte_num) AS "bytes", groupUniqArray(concat(common_l4_protocol, '/', toString(common_server_port))) AS "trans_app", common_server_ip AS "server_ip" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty(common_server_ip) ) GROUP BY "server_ip" ORDER BY "bytes" DESC LIMIT 1048576) GROUP BY "server_ip" ORDER BY "bytes" DESC LIMIT 30
--Q38.Browsing Users by Website domains and Sessions
SELECT "Subscriber ID" AS "Subscriber ID", "Http.Domain" AS "Http.Domain", sum(coalesce("sessions", 0)) AS "sessions" FROM (SELECT http_domain AS "Http.Domain", common_subscriber_id AS "Subscriber ID", sum(coalesce(common_sessions, 0)) AS "sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty(http_domain) AND notEmpty(common_subscriber_id) ) GROUP BY "Http.Domain", "Subscriber ID" ORDER BY "sessions" DESC LIMIT 1048576) GROUP BY "Subscriber ID", "Http.Domain" ORDER BY "sessions" DESC LIMIT 10000
--Q39.Top Domain and Server IP by Bytes Sent
SELECT "Http.Domain" AS "Http.Domain" , "Server IP" AS "Server IP" , SUM(coalesce("Bytes Sent",0)) AS "Bytes Sent" FROM ( SELECT common_server_ip AS "Server IP" , http_domain AS "Http.Domain" , SUM(coalesce(common_c2s_byte_num+common_s2c_byte_num,0)) AS "Bytes" , SUM(coalesce(common_c2s_byte_num,0)) AS "Bytes Sent" , SUM(coalesce(common_s2c_byte_num,0)) AS "Bytes Received" FROM tsg_galaxy_v3.session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty( http_domain) ) GROUP BY "Server IP" , "Http.Domain" ORDER BY "Bytes" desc LIMIT 1048576 ) GROUP BY "Http.Domain" , "Server IP" ORDER BY "Bytes Sent" desc LIMIT 10000
--Q40.Top30 Website Domains by Client IP and Sessions
SELECT "Http.Domain" AS "Http.Domain", "Client IP" AS "Client IP", sum(coalesce("sessions", 0)) AS "sessions" FROM (SELECT common_client_ip AS "Client IP", http_domain AS "Http.Domain", sum(coalesce(common_sessions, 0)) AS "sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty(http_domain) ) GROUP BY "Client IP", "Http.Domain" ORDER BY "sessions" DESC LIMIT 1048576) GROUP BY "Http.Domain", "Client IP" ORDER BY "sessions" DESC LIMIT 10000
--Q41.Domain is Accessed by Unique Client IP Trend(bytes Time Grain 5 minute)
SELECT toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),300)*300) AS _time , http_domain AS Domain, COUNT(DISTINCT(common_client_ip)) AS nums FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND notEmpty(http_domain) AND http_domain IN ( SELECT http_domain FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND notEmpty(http_domain) GROUP BY http_domain ORDER BY SUM(common_s2c_byte_num+common_c2s_byte_num) DESC LIMIT 5 ) GROUP BY toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),300)*300) , http_domain ORDER BY toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),300)*300) DESC LIMIT 10000
--Q42. Domain is Accessed by Unique Client IP Trend(sessions,Time Grain 5 minute)
SELECT toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),3600)*3600) AS stat_time , http_domain , uniq (common_client_ip) AS nums FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start)-604800 AND common_recv_time < toDateTime(@end) AND http_domain IN ( SELECT http_domain FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND notEmpty(http_domain) GROUP BY http_domain ORDER BY COUNT(*) desc LIMIT 5 ) group by toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))), 3600)*3600), http_domain ORDER BY stat_time desc LIMIT 10000
--Q43.Bandwidth Trend with Device ID(Time Grain 5 minute)
SELECT toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE))) AS "Receive Time", common_device_id AS "Device ID", sum(coalesce(common_c2s_byte_num + common_s2c_byte_num, 0)) AS "Bytes" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Receive Time", "Device ID" LIMIT 10000
--Q44.Internal IP by Sled IP and Sessions
SELECT "Internal IP" AS "Internal IP", "Sled IP" AS "Sled IP", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT common_sled_ip AS "Sled IP", common_internal_ip AS "Internal IP", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Sled IP", "Internal IP" LIMIT 1048576) GROUP BY "Internal IP", "Sled IP" ORDER BY "Sessions" DESC LIMIT 10000
--Q45.Bandwidth Trend with Internal IP (Time Grain 5 minute)
SELECT toUnixTimestamp(toDateTime(toStartOfInterval(toDateTime(common_recv_time),INTERVAL 5 MINUTE))) AS "Receive Time", sum(coalesce(common_c2s_byte_num + common_s2c_byte_num, 0)) AS "Bytes", sum(coalesce(common_c2s_pkt_num + common_s2c_pkt_num, 0)) AS "Packets", sum(coalesce(common_sessions, 0)) AS "New Sessions", sum(coalesce(common_c2s_byte_num, 0)) AS "Bytes Sent", sum(coalesce(common_s2c_byte_num, 0)) AS "Bytes Received", sum(coalesce(common_c2s_pkt_num, 0)) AS "Packets Sent", sum(coalesce(common_s2c_pkt_num, 0)) AS "Packets Received" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) AND @common_filter ) GROUP BY "Receive Time" LIMIT 10000
--Q46.Top30 Domains Detail with Internal IP
SELECT "Domain" AS "Domain", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT http_domain AS "Domain", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) AND @common_filter ) AND ( notEmpty(http_domain) ) GROUP BY "Domain" LIMIT 1048576) GROUP BY "Domain" ORDER BY "Sessions" DESC LIMIT 30
--Q47.Top30 URLS Detail with Internal IP
SELECT "URL" AS "URL", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT http_url AS "URL", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) AND @common_filter ) AND ( notEmpty(http_url) ) GROUP BY "URL" LIMIT 1048576) GROUP BY "URL" ORDER BY "Sessions" DESC LIMIT 30
--Q48.Top Domains with Unique Client IP and Subscriber ID
SELECT "Http.Domain" AS "Http.Domain", sum(coalesce("Unique Client IP", 0)) AS "Unique Client IP", sum(coalesce("Unique Subscriber ID", 0)) AS "Unique Subscriber ID" FROM (SELECT http_domain AS "Http.Domain", uniq(common_client_ip) AS "Unique Client IP", uniq(common_subscriber_id) AS "Unique Subscriber ID" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( notEmpty(http_domain) ) GROUP BY "Http.Domain" LIMIT 1048576) GROUP BY "Http.Domain" ORDER BY "Unique Client IP" DESC LIMIT 100
--Q49.Top100 Domains by Packets sent
SELECT "Http.Domain" AS "Http.Domain", sum(coalesce("Packets Sent", 0)) AS "Packets Sent" FROM (SELECT http_domain AS "Http.Domain", sum(coalesce(common_c2s_pkt_num, 0)) AS "Packets Sent" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Http.Domain" LIMIT 1048576) GROUP BY "Http.Domain" ORDER BY "Packets Sent" DESC LIMIT 100
--Q50.Internal and External asymmetric traffic
SELECT "Internal IP" AS "Internal IP", "External IP" AS "External IP", "Sled IP" AS "Sled IP", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT common_sled_ip AS "Sled IP", common_external_ip AS "External IP", common_internal_ip AS "Internal IP", sum(coalesce(common_c2s_byte_num + common_s2c_byte_num, 0)) AS "Bytes Sent+Bytes Received", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( common_stream_dir != 3 ) GROUP BY "Sled IP", "External IP", "Internal IP" LIMIT 1048576) GROUP BY "Internal IP", "External IP", "Sled IP" ORDER BY "Sessions" DESC LIMIT 500
--Q51.Client and Server ASN asymmetric traffic
SELECT "Client ASN" AS "Client ASN", "Server ASN" AS "Server ASN", sum(coalesce("Sessions", 0)) AS "Sessions" FROM (SELECT common_server_asn AS "Server ASN", common_client_asn AS "Client ASN", sum(coalesce(common_sessions, 0)) AS "Sessions" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) AND ( common_stream_dir != 3 ) GROUP BY "Server ASN", "Client ASN" LIMIT 1048576) GROUP BY "Client ASN", "Server ASN" ORDER BY "Sessions" DESC LIMIT 500
--Q52.Top handshake latency by Website and Client IPs
SELECT "SSL.SNI" AS "SSL.SNI", "Client IP" AS "Client IP", avg(coalesce("Establish Latency(ms)", 0)) AS "Establish Latency(ms)" FROM (SELECT common_client_ip AS "Client IP", ssl_sni AS "SSL.SNI", avg(coalesce(common_establish_latency_ms, 0)) AS "Establish Latency(ms)" FROM tsg_galaxy_v3.session_record AS session_record WHERE ( ( common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) ) ) GROUP BY "Client IP", "SSL.SNI" LIMIT 1048576) GROUP BY "SSL.SNI", "Client IP" ORDER BY "Establish Latency(ms)" DESC LIMIT 500
--Q53.Domain baidu.com Drill down Client IP
select common_client_ip as "Client IP" , avg(common_establish_latency_ms) as "Establishing Time Mean(ms)", count(1) as Responses,any(common_client_location) as Location FROM tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) and http_domain='baidu.com' group by "Client IP" order by Responses desc limit 100
--Q54.Domain baidu.com Drill down Server IP
select common_server_ip as "Server IP" , avg(http_response_latency_ms) as "Server Processing Time Mean(ms)", count(1) as Responses,any(common_server_location) as Location FROM tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) and http_domain='baidu.com' group by "Server IP" order by Responses desc limit 100
--Q55.Domain baidu.com Drill down URI
select http_url as "URI" , avg(http_response_latency_ms) as "Server Processing Time Mean(ms)", count(1) as Responses FROM tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) and http_domain='baidu.com' group by "URI" order by Responses desc limit 100
--Q56.L7 Protocol Metrics
select common_l7_protocol as "Protocol" , uniq(common_client_ip) as "Clients" , uniq(common_server_ip) as "Servers", count(1) as Sessions,sum(common_c2s_byte_num+common_s2c_byte_num) as bytes FROM tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) and notEmpty(common_l7_protocol) group by common_l7_protocol order by bytes desc
--Q57.L7 Protocol SIP Drill down Client IP
select common_client_ip as "Client IP" , count(1) as Sessions,sum(common_c2s_byte_num) as "Bytes Out", sum(common_s2c_byte_num) as "Bytes In",any(common_client_location) as Location FROM tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) and common_l7_protocol='SIP' group by "Client IP" order by Sessions desc limit 100
--Q58.L7 Protocol SIP Drill down Server IP
select common_server_ip as "Server IP" , count(1) as Sessions,sum(common_c2s_byte_num) as "Bytes Out", sum(common_s2c_byte_num) as "Bytes In",any(common_server_location) as Location FROM tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) and common_l7_protocol='SIP' group by "Server IP" order by Sessions desc limit 100
--Q59.Top5 Server IP keys with Unique Client IPs Trend (Grain 5 minute)
SELECT toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),300)*300) AS _time , common_server_ip AS server_ip, COUNT(DISTINCT(common_client_ip)) AS nums FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND common_server_ip IN ( SELECT common_server_ip FROM tsg_galaxy_v3.session_record AS session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) GROUP BY common_server_ip ORDER BY count(*) DESC LIMIT 5 ) GROUP BY toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),300)*300) , server_ip ORDER BY toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))),300)*300) DESC LIMIT 10000

View File

@@ -0,0 +1,21 @@
{
"version": "1.0",
"name": "druid-Raw",
"namespace": "druid",
"filters": [
{
"name":"@start",
"value": "'2021-10-19 10:00:00'"
},
{
"name":"@end",
"value": "'2021-10-20 11:00:00'"
},
{
"name":"@common_filter",
"value": [
"common_client_ip='192.168.44.21'and common_server_port=443"
]
}
]
}

View File

@@ -0,0 +1,92 @@
--Q01.All Security Event Hits
select policy_id, sum(hits) as hits from security_event_hits_log where __time >@start and __time <@end group by policy_id
--Q02.Security Event Hits with Policy ID 0
select policy_id, sum(hits) as hits from security_event_hits_log where __time >@start and __time <@end and policy_id in (0) group by policy_id
--Q03.All Security Event Hits Trend by 5min A
select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as start_time, sum(hits) as hits from security_event_hits_log where __time >= TIMESTAMP @start and __time < TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') limit 10000
--Q04.Security Event Hit Timefirst and last time) A
select policy_id,TIME_FORMAT(min(__time) ,'yyyy-MM-dd HH:mm:ss') as first_used, TIME_FORMAT(max(__time) ,'yyyy-MM-dd HH:mm:ss') as last_used from security_event_hits_log where policy_id in (0) group by policy_id
--Q05.Top 200 Security Policies
select policy_id, sum(hits) as hits from security_event_hits_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by policy_id order by hits desc limit 200
--Q06.Top 200 Security Policies with Action
select policy_id, action, sum(hits) as hits from security_event_hits_log where __time >=@start and __time <@end group by policy_id, action order by hits desc limit 200
--Q07.All Proxy Event Hits
select policy_id, sum(hits) as hits from proxy_event_hits_log where __time >=@start and __time <@end group by policy_id
--Q08.Proxy Event Hits with Policy ID 0
select policy_id, sum(hits) as hits from proxy_event_hits_log where __time >=@start and __time <@end and policy_id=0 group by policy_id
--Q09.All Proxy Event Hits Trend by 5min A
select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as start_time, sum(hits) as hits from proxy_event_hits_log where __time >= TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') limit 10000
--Q10.Proxy Event Hit Timefirst and last time) A
select policy_id,TIME_FORMAT(min(__time) ,'yyyy-MM-dd HH:mm:ss') as first_used, TIME_FORMAT(max(__time) ,'yyyy-MM-dd HH:mm:ss') as last_used from proxy_event_hits_log where policy_id in (0) group by policy_id
--Q11.Top 200 Proxy Policies
select policy_id, sum(hits) as hits from proxy_event_hits_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by policy_id order by hits desc limit 200
--Q12.Top 200 Proxy Policies with sub Action
select policy_id, sub_action as action, sum(hits) as hits from proxy_event_hits_log where __time >=@start and __time <@end group by policy_id, sub_action order by hits desc limit 200
--Q13.Proxy Action Hits
select sub_action as action, sum(hits) as hits from proxy_event_hits_log where __time >= TIMESTAMP @start and __time < TIMESTAMP @end group by sub_action
--Q14.Proxy Action Hits Trend by 5min
select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as start_time, sub_action as action, sum(hits) as hits from proxy_event_hits_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') , sub_action limit 10000
--Q15.Traffic Metrics Pinning Hits
SELECT sum(not_pinning_num) AS sessions, 'notPinningNum' AS type FROM traffic_metrics_log WHERE __time >= @start AND __time < @end UNION ALL SELECT sum(pinning_num) AS sessions, 'pinningNum' AS type FROM traffic_metrics_log WHERE __time >= @start AND __time < @end UNION ALL SELECT sum(maybe_pinning_num) AS sessions, 'maybePinningNum' AS type FROM traffic_metrics_log WHERE __time >= @start AND __time < @end
--Q16.Traffic Metrics Pinning Trend by 5Min
SELECT TIME_FORMAT( MILLIS_TO_TIMESTAMP( 1000 * (TIMESTAMP_TO_MILLIS(time_floor(0.001 * TIMESTAMP_TO_MILLIS( __time) * 1000,'PT300S'))/1000)),'YYYY-MM-dd HH:mm:ss') AS statisticTime, sum(pinning_num) AS sessions FROM traffic_metrics_log WHERE __time >= @start AND __time < @end GROUP BY TIME_FORMAT( MILLIS_TO_TIMESTAMP( 1000 * (TIMESTAMP_TO_MILLIS(time_floor(0.001 * TIMESTAMP_TO_MILLIS( __time) * 1000,'PT300S'))/1000)),'YYYY-MM-dd HH:mm:ss') LIMIT 10000
--Q17.Traffic Metrics Not Pinning Trend by 5Min
SELECT TIME_FORMAT( MILLIS_TO_TIMESTAMP( 1000 * (TIMESTAMP_TO_MILLIS(time_floor(0.001 * TIMESTAMP_TO_MILLIS( __time) * 1000,'PT300S'))/1000)),'YYYY-MM-dd HH:mm:ss') AS statisticTime, sum(not_pinning_num) AS sessions FROM traffic_metrics_log WHERE __time>= @start AND __time < @end GROUP BY TIME_FORMAT( MILLIS_TO_TIMESTAMP( 1000 * (TIMESTAMP_TO_MILLIS(time_floor(0.001 * TIMESTAMP_TO_MILLIS( __time) * 1000,'PT300S'))/1000)),'YYYY-MM-dd HH:mm:ss') LIMIT 10000
--Q18.Traffic Metrics Maybe Pinning Trend by 5Min
SELECT TIME_FORMAT( MILLIS_TO_TIMESTAMP( 1000 * (TIMESTAMP_TO_MILLIS(time_floor(0.001 * TIMESTAMP_TO_MILLIS( __time) * 1000,'PT300S'))/1000)),'YYYY-MM-dd HH:mm:ss') AS statisticTime, sum(maybe_pinning_num) AS sessions FROM traffic_metrics_log WHERE __time >= @start AND __time < @end GROUP BY TIME_FORMAT( MILLIS_TO_TIMESTAMP( 1000 * (TIMESTAMP_TO_MILLIS(time_floor(0.001 * TIMESTAMP_TO_MILLIS( __time) * 1000,'PT300S'))/1000)),'YYYY-MM-dd HH:mm:ss') LIMIT 10000
--Q19.Traffic Metrics Throughput Bytes IN/OUT
select sum(total_in_bytes) as traffic_in_bytes, sum(total_out_bytes) as traffic_out_bytes from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end
--Q20. Traffic Metrics Throughput Packets IN/OUT
select sum(total_in_packets) as traffic_in_packets, sum(total_out_packets) as traffic_out_packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end
--Q21.Traffic Metrics New Sessions
select sum(new_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end
--Q22.Traffic Metrics Bandwidth Bytes IN/OUT
select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'traffic_in_bytes' as type, sum(total_in_bytes) as bytes from traffic_metrics_log where __time >= @start and __time < @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'traffic_out_bytes' as type, sum(total_out_bytes) as bytes from traffic_metrics_log where __time >= @start and __time < @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss')
--Q23.Traffic Metrics Bandwidth Packets IN/OUT
select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'traffic_in_packets' as type, sum(total_in_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'traffic_out_packets' as type, sum(total_out_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss')
--Q24.Traffic Metrics New Sessions Trend by 5Min
select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'new_conn_num' as type, sum(new_conn_num) as sessions from traffic_metrics_log where __time >= @start and __time < @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss')
--Q25.Traffic Metrics New and Live Sessions
select sum(new_conn_num) as new_conn_num, sum(established_conn_num) as established_conn_num from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end
--Q26.Traffic Metrics New and Live Sessions Trend by 5Min
select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'new_conn_num' as type, sum(new_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time < TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'established_conn_num' as type, sum(established_conn_num) as sessions from traffic_metrics_log where __time >= TIMESTAMP @start and __time < TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT30S'),'yyyy-MM-dd HH:mm:ss')
--Q27.Traffic Metrics Security Throughput Bytes
select sum(default_in_bytes+default_out_bytes) as default_bytes, sum(allow_in_bytes+allow_out_bytes) as allow_bytes, sum(deny_in_bytes+deny_out_bytes) as deny_bytes, sum(monitor_in_bytes+monitor_out_bytes) as monitor_bytes, sum(intercept_in_bytes+intercept_out_bytes) as intercept_bytes from traffic_metrics_log where __time >=TIMESTAMP @start and __time < TIMESTAMP @end
--Q28.Traffic Metrics Security Throughput Packets
select sum(default_in_packets+default_out_packets) as default_packets, sum(allow_in_packets+allow_in_packets) as allow_packets, sum(deny_in_packets+deny_out_packets) as deny_packets, sum(monitor_in_packets+monitor_out_packets) as monitor_packets, sum(intercept_in_packets+intercept_out_packets) as intercept_packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end
--Q29.Traffic Metrics Security Throughput Sessions
select sum(default_conn_num) as default_sessions, sum(allow_conn_num) as allow_sessions, sum(deny_conn_num) as deny_sessions, sum(monitor_conn_num) as monitor_sessions, sum(intercept_conn_num) as intercept_sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end
--Q30.Traffic Metrics Security Bandwidth Bytes by 5Min
select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'default_bytes' as type, sum(default_in_bytes+default_out_bytes) as bytes from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'allow_bytes' as type, sum(allow_in_bytes+allow_out_bytes) as bytes from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'deny_bytes' as type, sum(deny_in_bytes+deny_out_bytes) as bytes from traffic_metrics_log where __time >= TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'monitor_bytes' as type, sum(monitor_in_bytes+monitor_out_bytes) as bytes from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'intercept_bytes' as type, sum(intercept_in_bytes+intercept_out_bytes) as bytes from traffic_metrics_log where __time >= @start and __time < @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss')
--Q31.Traffic Metrics Security Bandwidth Packets by 5Min
select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'default_packets' as type, sum(default_in_packets+default_out_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'allow_packets' as type, sum(allow_in_packets+allow_out_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'deny_packets' as type, sum(deny_in_packets+deny_out_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'monitor_packets' as type, sum(monitor_in_packets+monitor_out_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'intercept_packets' as type, sum(intercept_in_packets+intercept_out_packets) as packets from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss')
--Q32.Traffic Metrics Security Sessions Trend by 5Min
select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'default_conn_num' as type, sum(default_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'allow_conn_num' as type, sum(allow_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'deny_conn_num' as type, sum(deny_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'monitor_conn_num' as type, sum(monitor_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') union all select TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss') as stat_time, 'intercept_conn_num' as type, sum(intercept_conn_num) as sessions from traffic_metrics_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by TIME_FORMAT(time_floor(__time,'PT5M'),'yyyy-MM-dd HH:mm:ss')
--Q33.Top 100 Client IP by Sessions
select source as client_ip, sum(session_num) as sessions, sum(c2s_byte_num) as sent_bytes, sum(s2c_byte_num) as received_bytes, sum(c2s_byte_num + s2c_byte_num) as bytes, sum(c2s_pkt_num) as sent_packets ,sum(s2c_pkt_num) as received_packets, sum(c2s_pkt_num+s2c_pkt_num) as packets from top_client_ip_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end and order_by='sessions' group by source order by sessions desc limit 100
--Q34.Top 100 Server IP by Sessions
select destination as server_ip, sum(session_num) as sessions, sum(c2s_byte_num) as sent_bytes, sum(s2c_byte_num) as received_bytes, sum(c2s_byte_num + s2c_byte_num) as bytes, sum(c2s_pkt_num) as sent_packets ,sum(s2c_pkt_num) as received_packets, sum(c2s_pkt_num+s2c_pkt_num) as packets from top_server_ip_log where __time >= @start and __time < @end and order_by='sessions' group by destination order by sessions desc limit 100
--Q35.Top 100 Internal IP by Sessions
select source as internal_ip, sum(session_num) as sessions, sum(c2s_byte_num) as sent_bytes, sum(s2c_byte_num) as received_bytes, sum(c2s_byte_num + s2c_byte_num) as bytes, sum(c2s_pkt_num) as sent_packets ,sum(s2c_pkt_num) as received_packets, sum(c2s_pkt_num+s2c_pkt_num) as packets from top_internal_host_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end and order_by='sessions' group by source order by sessions desc limit 100
--Q36.Top 100 External IP by Sessions
select destination as external_ip, sum(session_num) as sessions, sum(c2s_byte_num) as sent_bytes, sum(s2c_byte_num) as received_bytes, sum(c2s_byte_num + s2c_byte_num) as bytes, sum(c2s_pkt_num) as sent_packets ,sum(s2c_pkt_num) as received_packets, sum(c2s_pkt_num+s2c_pkt_num) as packets from top_external_host_log where __time >= @start and __time < @end and order_by='sessions' group by destination order by sessions desc limit 100
--Q37.Top 100 Domain by Bytes
select domain, sum(session_num) as sessions, sum(c2s_byte_num) as sent_bytes, sum(s2c_byte_num) as received_bytes, sum(c2s_byte_num + s2c_byte_num) as bytes, sum(c2s_pkt_num) as sent_packets ,sum(s2c_pkt_num) as received_packets, sum(c2s_pkt_num+s2c_pkt_num) as packets from top_website_domain_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end and order_by='bytes' group by domain order by bytes desc limit 100
--Q38.Top 100 Subscriber ID by Sessions
select subscriber_id, sum(session_num) as sessions, sum(c2s_byte_num) as sent_bytes, sum(s2c_byte_num) as received_bytes, sum(c2s_byte_num + s2c_byte_num) as bytes, sum(c2s_pkt_num) as sent_packets ,sum(s2c_pkt_num) as received_packets, sum(c2s_pkt_num+s2c_pkt_num) as packets from top_user_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end and order_by='sessions' group by subscriber_id order by sessions desc limit 100
--Q39.Top 100 Hit URLS by hits
select url,sum(session_num) as hits from top_urls_log where __time >=TIMESTAMP @start and __time <TIMESTAMP @end group by url order by hits desc limit 100
--Q40.Proxy Event Unique ISP
SELECT policy_id, APPROX_COUNT_DISTINCT_DS_HLL(isp) as num FROM proxy_event_hits_log where __time >= @start and __time < @end group by policy_id
--Q41.Traffic Composition Metrics
SELECT APPROX_COUNT_DISTINCT_DS_HLL(ip_object) AS uniq_client_ip, SUM(one_sided_connections) AS one_sided_connections, SUM(uncategorized_bytes) AS total_uncategorized_bytes, SUM(fragmentation_packets) AS fragmentation_packets, SUM(sequence_gap_loss) AS sequence_gap_loss_bytes, SUM(s2c_byte_num+c2s_byte_num) AS summaryTotalBytes, SUM(s2c_pkt_num+c2s_pkt_num) AS summaryTotalPackets, SUM(sessions) AS summarySessions FROM traffic_summary_log WHERE __time >= TIMESTAMP @start AND __time < TIMESTAMP @end LIMIT 1
--Q42.Traffic Composition Throughput
(SELECT SUM(c2s_byte_num + s2c_byte_num) as total_bytes, SUM(sessions) as total_sessions, (SUM(c2s_byte_num + s2c_byte_num) * 8)/((TIMESTAMP_TO_MILLIS(TIMESTAMP @end )-TIMESTAMP_TO_MILLIS(TIMESTAMP @start ))/1000) AS data_rate FROM traffic_protocol_stat_log WHERE __time >= TIMESTAMP @start AND __time < TIMESTAMP @end AND protocol_id = 'ETHERNET' LIMIT 1) UNION ALL ( SELECT SUM(sessions), 0, 0 FROM traffic_protocol_stat_log WHERE __time >= TIMESTAMP @start AND __time < TIMESTAMP @end AND protocol_id = 'ETHERNET' GROUP BY __time ORDER BY __time DESC LIMIT 1 )
--Q43.Traffic Composition Protocol Tree
SELECT protocol_id, SUM(sessions) as sessions,SUM(c2s_byte_num) as c2s_byte_num, SUM(c2s_pkt_num) as c2s_pkt_num, SUM(s2c_byte_num) as s2c_byte_num, SUM(s2c_pkt_num) as s2c_pkt_num FROM traffic_protocol_stat_log WHERE __time >= TIMESTAMP @start AND __time < TIMESTAMP @end GROUP BY protocol_id
--Q44.System Quota
SELECT log_type, SUM(used_size) as used_size, SUM(max_size) * 7/10 as max_size, TIME_FORMAT(LATEST(last_storage) * 1000,'YYYY-MM-dd') as first_storage FROM ( SELECT log_type, LATEST(used_size) as used_size, LATEST(max_size) as max_size, LATEST(last_storage) as last_storage FROM sys_storage_log WHERE __time >= CURRENT_TIMESTAMP - INTERVAL '1' HOUR AND data_center != '' GROUP BY data_center,log_type ) GROUP BY log_type
--Q45.System Quota Daily Trend
select TIME_FORMAT(__time,'YYYY-MM-dd') as stat_time,log_type as type, sum(aggregate_size) as used_size from sys_storage_log where __time >= @start and __time < @end group by TIME_FORMAT(__time,'YYYY-MM-dd'), log_type
--Q46.Traffic Statistics(Metrics01)
select sum(total_hit_sessions) as total_hit_sessions, sum(total_bytes_transferred) as total_bytes_transferred, sum(total_packets_transferred) as total_packets_transferred, sum(total_new_sessions) as total_new_sessions , sum(total_close_sessions) as total_close_sessions, sum(average_new_sessions_per_second) as average_new_sessions_per_second , sum(average_bytes_per_second) as average_bytes_per_second , sum(average_packets_per_second) as average_packets_per_second , COUNT(DISTINCT(device_id)) as device_num, sum(live_sessions) as average_live_sessions from ( select device_id, sum(intercept_conn_num + monitor_conn_num + deny_conn_num + allow_conn_num) as total_hit_sessions, sum(total_in_bytes + total_out_bytes) as total_bytes_transferred, sum(total_in_packets + total_out_packets) as total_packets_transferred, sum(new_conn_num) as total_new_sessions, sum(close_conn_num) as total_close_sessions, avg(nullif(new_conn_num, 0))/ 5 as average_new_sessions_per_second, avg(nullif(total_in_bytes + total_out_bytes, 0))* 8 / 5 as average_bytes_per_second, avg(nullif(total_in_packets + total_out_packets, 0))/ 5 as average_packets_per_second, avg(nullif(established_conn_num, 0)) as live_sessions from traffic_metrics_log where __time >= @start and __time < @end group by device_id)

View File

@@ -0,0 +1,53 @@
{
"version": "1.0",
"name": "Engine-Raw",
"namespace": "Engine",
"filters": [
{
"name":"@start",
"value": "'2021-10-19 10:00:00'"
},
{
"name":"@end",
"value": "'2021-10-20 11:00:00'"
},
{
"name":"@common_filter",
"value": [
"common_log_id=1153021139190754263",
"common_client_ip='36.189.226.21'",
"common_internal_ip='223.116.37.192'",
"common_server_ip='8.8.8.8'",
"common_external_ip='111.10.53.14'",
"common_client_port=52607",
"common_server_port=443",
"common_c2s_pkt_num>5",
"common_s2c_pkt_num>5",
"common_c2s_byte_num>100",
"common_s2c_byte_num<200",
"common_schema_type='DNS'",
"common_establish_latency_ms>200",
"common_con_duration_ms>10000",
"common_stream_trace_id=1153021139190754263",
"common_tcp_client_isn=2857077935",
"common_tcp_server_isn=0",
"http_domain='microsoft.com'",
"mail_account='abc@xx.com'",
"mail_subject='test'",
"dns_qname='qbwup.imtt.qq.com'",
"ssl_sni='note.youdao.com'",
"ssl_con_latency_ms>100",
"ssl_ja3_hash='a0e9f5d64349fb13191bc781f81f42e1'",
"common_client_ip='36.189.226.21' and common_server_ip='8.8.8.8'",
"common_server_ip='111.10.53.14' and common_server_port=443",
"mail_account like 'abc@%'",
"http_domain like '%baidu.com%'",
"ssl_sni like '%youdao.com'",
"common_client_ip in ('36.189.226.21','111.10.53.14')",
"common_server_port not in (80,443)",
"notEmpty(http_domain)",
"http_domain not like '%microsoft.com'"
]
}
]
}

View File

@@ -0,0 +1,120 @@
--Q01.CK DateTime
select toDateTime(common_recv_time) as common_recv_time from session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) limit 20
--Q02.Standard DateTime
select FROM_UNIXTIME(common_recv_time) as common_recv_time from session_record where common_recv_time >= UNIX_TIMESTAMP(@start) and common_recv_time< UNIX_TIMESTAMP(@end) limit 20
--Q03.count(1)
select count(1) from session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end)
--Q04.count(*)
select count(*) from session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end)
--Q05.UDF APPROX_COUNT_DISTINCT_DS_HLL
SELECT policy_id, APPROX_COUNT_DISTINCT_DS_HLL(isp) as num FROM proxy_event_hits_log where __time >= @start and __time < @end and policy_id=0 group by policy_id
--Q06.UDF TIME_FLOOR_WITH_FILL
select TIME_FLOOR_WITH_FILL(common_recv_time,'PT5M','previous') as stat_time from session_record where common_recv_time > @start and common_recv_time < @end group by stat_time
--Q07.UDF GEO IP
select IP_TO_GEO(common_client_ip) as geo,IP_TO_CITY(common_server_ip) as city,IP_TO_COUNTRY(common_server_ip) as country from session_record limit 10
--Q08.Special characters
select * from session_record where (common_protocol_label ='/$' or common_client_ip like'%') limit 10
--Q09.Federation Query
select * from (select FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(common_recv_time,'PT5M','zero')) as stat_time from session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) group by stat_time order by stat_time asc)
--Q10.Catalog Database
select * from tsg_galaxy_v3.session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) limit 20
--Q11.Session Record Logs
select * from session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) AND @common_filter order by common_recv_time desc limit 20
--Q12.Live Session Record Logs
select * from interim_session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) AND @common_filter order by common_recv_time desc limit 20
--Q13.Transaction Record Logs
select * from transaction_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) order by common_recv_time desc limit 20
--Q14.Security Event Logs
select * from security_event where common_recv_time >= UNIX_TIMESTAMP(@start) and common_recv_time< UNIX_TIMESTAMP(@end) AND @common_filter order by common_recv_time desc limit 0,20
--Q15.Proxy Event Logs
select * from proxy_event where common_recv_time >= UNIX_TIMESTAMP(@start) and common_recv_time< UNIX_TIMESTAMP(@end) order by common_recv_time desc limit 0,20
--Q16.Radius Record Logs
select * from radius_record where common_recv_time >= UNIX_TIMESTAMP(@start) and common_recv_time< UNIX_TIMESTAMP(@end) order by common_recv_time desc limit 0,20
--Q17.GTPC Record Logs
select * from gtpc_record where common_recv_time >= UNIX_TIMESTAMP(@start) and common_recv_time< UNIX_TIMESTAMP(@end) order by common_recv_time desc limit 0,20
--Q18.Security Event Logs with fields
select FROM_UNIXTIME(common_recv_time) as common_recv_time,common_log_id,common_policy_id,common_subscriber_id,common_client_ip,common_client_port,common_l4_protocol,common_address_type,common_server_ip,common_server_port,common_action,common_direction,common_sled_ip,common_client_location,common_client_asn,common_server_location,common_server_asn,common_c2s_pkt_num,common_s2c_pkt_num,common_c2s_byte_num,common_s2c_byte_num,common_schema_type,common_sub_action,common_device_id, FROM_UNIXTIME(common_start_time) as common_start_time, FROM_UNIXTIME(common_end_time) as common_end_time,common_establish_latency_ms,common_con_duration_ms,common_stream_dir,common_stream_trace_id,http_url,http_host,http_domain,http_request_body,http_response_body,http_cookie,http_referer,http_user_agent,http_content_length,http_content_type,http_set_cookie,http_version,http_response_latency_ms,http_action_file_size,http_session_duration_ms,mail_protocol_type,mail_account,mail_from_cmd,mail_to_cmd,mail_from,mail_to,mail_cc,mail_bcc,mail_subject,mail_attachment_name,mail_eml_file,dns_message_id,dns_qr,dns_opcode,dns_aa,dns_tc,dns_rd,dns_ra,dns_rcode,dns_qdcount,dns_ancount,dns_nscount,dns_arcount,dns_qname,dns_qtype,dns_qclass,dns_cname,dns_sub,dns_rr,ssl_sni,ssl_san,ssl_cn,ssl_pinningst,ssl_intercept_state,ssl_server_side_latency,ssl_client_side_latency,ssl_server_side_version,ssl_client_side_version,ssl_cert_verify,ssl_error,quic_version,quic_sni,quic_user_agent,ftp_account,ftp_url,ftp_content from security_event where common_recv_time >= @start and common_recv_time < @end order by common_recv_time desc limit 10000
--Q19.Radius ON/OFF Logs For Frame IP
select framed_ip, arraySlice(groupUniqArray(concat(toString(event_timestamp),':', if(acct_status_type=1,'start','stop'))),1,100000) as timeseries from radius_onff_log where event_timestamp >=toDateTime(@start) and event_timestamp <toDateTime(@end) group by framed_ip limit 20
--Q20.Radius ON/OFF Logs For Account
select account, arraySlice(groupUniqArray(concat(toString(event_timestamp),':', if(acct_status_type=1,'start','stop'))),1,100000) as timeseries from radius_onff_log where event_timestamp >= @start and event_timestamp < @end group by account
--Q21.Radius ON/OFF Logs total Account number
select count(distinct(framed_ip)) as active_ip_num , sum(acct_session_time) as online_duration from (select any(framed_ip) as framed_ip ,max(acct_session_time) as acct_session_time from radius_onff_log where account='000jS' and event_timestamp >= @start and event_timestamp < @end group by acct_session_id)
--Q22.Radius ON/OFF Logs Account Access Detail
select max(if(acct_status_type=1,event_timestamp,0)) as start_time,max(if(acct_status_type=2,event_timestamp,0)) as end_time, any(framed_ip) as ip,max(acct_session_time) as online_duration from radius_onff_log where event_timestamp >= @start and event_timestamp < @end group by acct_session_id order by start_time desc limit 200
--Q23.Report for Client IP
select common_client_ip, count(*) as sessions from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@end)) group by common_client_ip order by sessions desc limit 0,100
--Q24.Report for Server IP
select common_server_ip, count(*) as sessions from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) group by common_server_ip order by sessions desc limit 0,100
--Q25.Report for SSL SNI
select ssl_sni, count(*) as sessions from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) group by ssl_sni order by sessions desc limit 0,100
--Q26.Report for SSL APP
select common_app_label as applicaiton, count(*) as sessions from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) group by applicaiton order by sessions desc limit 0,100
--Q27.Report for Domains
select http_domain AS domain,SUM(coalesce(common_c2s_byte_num, 0)) AS sent_bytes,SUM(coalesce(common_s2c_byte_num, 0)) AS received_bytes,SUM(coalesce(common_c2s_byte_num, 0)+coalesce(common_s2c_byte_num, 0)) AS bytes FROM session_record WHERE common_recv_time >= toStartOfDay(toDateTime(@start))-86400 AND common_recv_time < toStartOfDay(toDateTime(@start)) and notEmpty(domain) GROUP BY domain ORDER BY bytes DESC LIMIT 100
--Q28.Report for Domains with unique Client IP
select toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))), 300)*300) as stat_time, http_domain, uniq (common_client_ip) as nums from session_record where common_recv_time >= toStartOfDay(toDateTime(@start))-86400 AND common_recv_time < toStartOfDay(toDateTime(@start)) and http_domain in (select http_domain from session_record where common_recv_time >= toStartOfDay(toDateTime(@start))-86400 AND common_recv_time < toStartOfDay(toDateTime(@start)) and notEmpty(http_domain) group by http_domain order by SUM(coalesce(common_c2s_byte_num, 0)+coalesce(common_s2c_byte_num, 0)) desc limit 10 ) group by toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))), 300)*300), http_domain order by stat_time asc limit 500
--Q29. Report for HTTP Host
SELECT http_host as host, SUM(coalesce(common_c2s_byte_num, 0)) AS sent_bytes,SUM(coalesce(common_s2c_byte_num, 0)) AS received_bytes,SUM(coalesce(common_c2s_byte_num, 0)+coalesce(common_s2c_byte_num, 0)) AS bytes FROM session_record WHERE common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) and notEmpty(http_host) GROUP BY host ORDER BY bytes DESC limit 100 union all SELECT 'totals' as host, SUM(coalesce(common_c2s_byte_num, 0)) AS sent_bytes, SUM(coalesce(common_s2c_byte_num, 0)) AS received_bytes, SUM(coalesce(common_c2s_byte_num, 0)+coalesce(common_s2c_byte_num, 0)) AS bytes from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) and notEmpty(http_host)
--Q30.Report for HTTP/HTTPS URLS with Sessions
SELECT http_url AS url,count(*) AS sessions FROM proxy_event WHERE common_recv_time >= toStartOfDay(toDateTime(@start))-86400 AND common_recv_time < toStartOfDay(toDateTime(@start)) and notEmpty(http_url) GROUP BY url ORDER BY sessions DESC LIMIT 100
--Q31.Report for HTTP/HTTPS URLS with UNIQUE Client IP
select toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))), 300)*300) as stat_time, http_url, count(distinct(common_client_ip)) as nums from proxy_event where common_recv_time >= toStartOfDay(toDateTime(@start))-86400 AND common_recv_time < toStartOfDay(toDateTime(@start)) and http_url IN (select http_url from proxy_event where common_recv_time >= toStartOfDay(toDateTime(@start))-86400 AND common_recv_time < toStartOfDay(toDateTime(@start)) and notEmpty(http_url) group by http_url order by count(*) desc limit 10 ) group by toDateTime(intDiv(toUInt32(toDateTime(toDateTime(common_recv_time))), 300)*300), http_url order by stat_time asc limit 500
--Q32.Report for Subscriber ID with Sessions
select common_subscriber_id as user, count(*) as sessions from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) and notEmpty(user) group by common_subscriber_id order by sessions desc limit 0,100
--Q33.Report for Subscriber ID with Bandwidth
SELECT common_subscriber_id as user,SUM(coalesce(common_c2s_byte_num, 0)) AS sent_bytes,SUM(coalesce(common_s2c_byte_num, 0)) AS received_bytes,SUM(coalesce(common_c2s_byte_num, 0)+coalesce(common_s2c_byte_num, 0)) AS bytes FROM session_record WHERE common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start)) and notEmpty(user) GROUP BY user ORDER BY bytes DESC LIMIT 100
--Q34.Report Unique Endpoints
select uniq(common_client_ip) as "Client IP",uniq(common_server_ip) as "Server IP",uniq(common_internal_ip) as "Internal IP",uniq(common_external_ip) as "External IP",uniq(http_domain) as "Domain",uniq(ssl_sni) as "SNI" from session_record where common_recv_time>= toStartOfDay(toDateTime(@start))-604800 and common_recv_time< toStartOfDay(toDateTime(@start))
--Q35.TopN Optimizer
SELECT http_url AS url, SUM(common_sessions) AS sessions FROM session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND notEmpty(http_url) GROUP BY http_url ORDER BY sessions DESC limit 10
--Q36.All Security Event Hits Trend by 5min B
select DATE_FORMAT(FROM_UNIXTIME(FLOOR(UNIX_TIMESTAMP(__time)/300)*300),'%Y-%m-%d %H:%i:%s') as start_time, sum(hits) as hits from security_event_hits_log where __time >= @start and __time < @end group by DATE_FORMAT(FROM_UNIXTIME(FLOOR(UNIX_TIMESTAMP(__time)/300)*300),'%Y-%m-%d %H:%i:%s') limit 10000
--Q37.Security Event Hit Timefirst and last time) B
select policy_id, DATE_FORMAT(min(__time) ,'%Y-%m-%d %H:%i:%s') as first_used, DATE_FORMAT(max(__time) ,'%Y-%m-%d %H:%i:%s') as last_used from security_event_hits_log where policy_id in (0) group by policy_id
--Q38.All Proxy Event Hits Trend by 5min B
select FROM_UNIXTIME(FLOOR(UNIX_TIMESTAMP(__time)/300)*300) as start_time, sum(hits) as hits from proxy_event_hits_log where __time >= @start and __time < @end group by FROM_UNIXTIME(FLOOR(UNIX_TIMESTAMP(__time)/300)*300) limit 10000
--Q39.Proxy Event Hit Timefirst and last time) B
select policy_id, DATE_FORMAT(min(__time) ,'%Y-%m-%d %H:%i:%s') as first_used, DATE_FORMAT(max(__time) ,'%Y-%m-%d %H:%i:%s') as last_used from proxy_event_hits_log where policy_id in (0) group by policy_id
--Q40.Traffic Composition Protocol Tree Trend
(SELECT TIME_FORMAT(MILLIS_TO_TIMESTAMP( 1000 * TIME_FLOOR_WITH_FILL(TIMESTAMP_TO_MILLIS(__time)/1000, 'PT30S', 'zero')), 'yyyy-MM-dd HH:mm:ss') as stat_time, protocol_id as type, sum(c2s_byte_num + s2c_byte_num) as bytes from traffic_protocol_stat_log where __time >= TIMESTAMP @start AND __time < TIMESTAMP @end and protocol_id = 'ETHERNET' group by TIME_FORMAT(MILLIS_TO_TIMESTAMP( 1000 * TIME_FLOOR_WITH_FILL(TIMESTAMP_TO_MILLIS(__time)/1000, 'PT30S', 'zero')), 'yyyy-MM-dd HH:mm:ss'), protocol_id order by stat_time asc) union all (SELECT TIME_FORMAT(MILLIS_TO_TIMESTAMP( 1000 * TIME_FLOOR_WITH_FILL(TIMESTAMP_TO_MILLIS(__time)/1000, 'PT30S', 'zero')), 'yyyy-MM-dd HH:mm:ss') as stat_time, protocol_id as type, sum(c2s_byte_num + s2c_byte_num) as bytes from traffic_protocol_stat_log where __time >= TIMESTAMP @start AND __time < TIMESTAMP @end and protocol_id like CONCAT('ETHERNET','.%') and LENGTH(protocol_id) = LENGTH(REPLACE(protocol_id,'.','')) + 1 + 0 group by TIME_FORMAT(MILLIS_TO_TIMESTAMP( 1000 * TIME_FLOOR_WITH_FILL(TIMESTAMP_TO_MILLIS(__time)/1000, 'PT30S', 'zero')), 'yyyy-MM-dd HH:mm:ss'), protocol_id order by stat_time asc)
--Q41.Traffic Metrics Security Action Hits Trend
select FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1800S','zero')) as statisticTime, sum(default_in_bytes + default_out_bytes) as default_bytes, sum(default_in_packets + default_out_packets) as default_packets, sum(default_conn_num) as default_sessions, sum(allow_in_bytes + allow_out_bytes) as allow_bytes, sum(allow_in_packets + allow_out_packets) as allow_packets, sum(allow_conn_num) as allow_sessions, sum(deny_in_bytes + deny_out_bytes) as deny_bytes, sum(deny_in_packets + deny_out_packets) as deny_packets, sum(deny_conn_num) as deny_sessions, sum(monitor_in_bytes + monitor_out_bytes) as monitor_bytes, sum(monitor_in_packets + monitor_out_packets) as monitor_packets, sum(monitor_conn_num) as monitor_sessions, sum(intercept_in_bytes + intercept_out_bytes) as intercept_bytes, sum(intercept_in_packets + intercept_out_packets) as intercept_packets, sum(intercept_conn_num) as intercept_sessions from traffic_metrics_log where __time >= @start and __time < @end group by FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1800S','zero')) limit 100000
--Q42.Traffic Metrics Proxy Action Hits Trend
SELECT FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1800S','zero')) AS statisticTime,SUM(intcp_allow_num) AS intercept_allow_conn_num,SUM(intcp_mon_num) AS intercept_monitor_conn_num,SUM(intcp_deny_num) AS intercept_deny_conn_num,SUM(intcp_rdirt_num) AS intercept_redirect_conn_num,SUM(intcp_repl_num) AS intercept_replace_conn_num,SUM(intcp_hijk_num) AS intercept_hijack_conn_num,SUM(intcp_ins_num) AS intercept_insert_conn_num FROM traffic_metrics_log WHERE __time >= @start AND __time < @end GROUP BY FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time), 'PT1800S', 'zero')) LIMIT 100000
--Q43.Traffic Statistics(Metrics02)
select FROM_UNIXTIME(stat_time) as max_active_date_by_sessions, total_live_sessions as max_live_sessions from ( select stat_time, sum(live_sessions) as total_live_sessions from ( select TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time), 'P1D') as stat_time, device_id, avg(established_conn_num) as live_sessions from traffic_metrics_log where __time >= @start and __time<@end group by TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time), 'P1D'), device_id) group by stat_time order by total_live_sessions desc limit 1 )
--Q44.Traffic Summary(Bandwidth Trend)
select * from ( select DATE_FORMAT(FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1h','zero')),'%Y-%m-%d %H:%i:%s') as stat_time,'traffic_in_bytes' as type, sum(total_in_bytes) as bytes from traffic_metrics_log where __time >= @start and __time < @end group by DATE_FORMAT(FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1h','zero')),'%Y-%m-%d %H:%i:%s'), 'traffic_in_bytes' union all select DATE_FORMAT(FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1h','zero')),'%Y-%m-%d %H:%i:%s') as stat_time,'traffic_out_bytes' as type,sum(total_out_bytes) as bytes from traffic_metrics_log where __time >= @start and __time < @end group by DATE_FORMAT(FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1h','zero')),'%Y-%m-%d %H:%i:%s'),'traffic_out_bytes' ) order by stat_time asc limit 100000
--Q45.Traffic Summary(Sessions Trend)
select DATE_FORMAT(FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1h','zero')),'%Y-%m-%d %H:%i:%s') as stat_time, 'total_conn_num' as type, sum(new_conn_num) as sessions from traffic_metrics_log where __time >= @start and __time < @end group by DATE_FORMAT(FROM_UNIXTIME(TIME_FLOOR_WITH_FILL(UNIX_TIMESTAMP(__time),'PT1h','zero')),'%Y-%m-%d %H:%i:%s'), 'total_conn_num' order by stat_time asc limit 10000
--Q46.Domain Baidu.com Metrics
select FROM_UNIXTIME(min(common_recv_time)) as "First Seen" , FROM_UNIXTIME(max(common_recv_time)) as "Last Seen" , median(http_response_latency_ms) as "Server Processing Time Median(ms)", count(1) as Responses,any(common_server_location) as Location from session_record WHERE common_recv_time >= toDateTime(@start) AND common_recv_time < toDateTime(@end) AND http_domain='baidu.com'
--Q47.TIME_FLOOR_WITH_FILL 01
select "Device Group" as "Device Group" ,"Data Center" as "Data Center" ,FROM_UNIXTIME("End Time") as "End Time" , sum("counter") as "counter" from (select common_device_group as "Device Group" ,common_data_center as "Data Center" ,TIME_FLOOR_WITH_FILL (common_end_time,'PT1H','zero') as "End Time" ,count(common_log_id) as "counter" from session_record where common_recv_time >= toDateTime(@start) and common_recv_time< toDateTime(@end) group by "Device Group","Data Center","End Time") group by "Device Group" ,"Data Center" ,"End Time" order by "End Time" asc limit 5
--Q48.TIME_FLOOR_WITH_FILL 02
select FROM_UNIXTIME("End Time") as "End Time" , sum("counter") as "counter" from (select common_device_group as "Device Group" ,common_data_center as "Data Center" ,TIME_FLOOR_WITH_FILL (common_end_time,'PT1H','zero') as "End Time" ,count(common_log_id) as "counter" ,count(http_domain) as "HTTP.Domain" from security_event where ((common_recv_time >= toDateTime('2021-10-19 00:00:00') and common_recv_time < toDateTime('2021-10-20 00:00:00')) ) AND ( ( common_action = 2 ) ) group by "Device Group","Data Center","End Time") group by "End Time" order by "End Time" asc
--Q49.CONVERT_TZ (Druid) 01
SELECT CONVERT_TZ('2019-09-09 09:09:09','GMT','MET') as test_time from proxy_event_hits_log limit 1
--Q50.CONVERT_TZ (Druid) 02
SELECT CONVERT_TZ('2019-09-09 09:09:09','Europe/London','America/New_York') as test_time from proxy_event_hits_log limit 1
--Q51.CONVERT_TZ (Druid) 03
SELECT CONVERT_TZ(now(),'GMT','America/New_York') as test_time from proxy_event_hits_log limit 1
--Q52.CONVERT_TZ (clickhouse) 01
SELECT CONVERT_TZ('2019-09-09 09:09:09','GMT','MET') as test_time from session_record limit 1
--Q53.CONVERT_TZ (clickhouse) 02
SELECT CONVERT_TZ('2019-09-09 09:09:09','Europe/London','America/New_York') as test_time from session_record limit 1
--Q54.CONVERT_TZ (clickhouse) 03
SELECT CONVERT_TZ(now(),'GMT','America/New_York') as test_time from session_record limit 1
--Q55.CONVERT_TZ (hbase) 01
SELECT CONVERT_TZ('2019-09-09 09:09:09','GMT','MET') as test_time from report_result limit 1
--Q56.CONVERT_TZ (hbase) 02
SELECT CONVERT_TZ('2019-09-09 09:09:09','Europe/London','America/New_York') as test_time from report_result limit 1
--Q57.CONVERT_TZ (hbase) 03
SELECT CONVERT_TZ(now(),'GMT','America/New_York') as test_time from report_result limit 1
--Q58.CONVERT_TZ (elasticsearch)
SELECT CONVERT_TZ('2019-09-09 09:09:09','Europe/London','America/New_York') as time from report_result limit 1
--Q59.Authentication failed(code 516)
SELECT toDateTime(common_recv_time) AS common_recv_time, common_log_id, common_subscriber_id, common_imei, common_imsi, common_phone_number, common_client_ip, common_internal_ip, common_client_port, common_l4_protocol, common_address_type, common_server_ip, common_server_port, common_external_ip, common_direction, common_sled_ip, common_client_location, common_client_asn, common_server_location, common_server_asn, common_sessions, common_c2s_pkt_num, common_s2c_pkt_num, common_c2s_byte_num, common_s2c_byte_num, common_c2s_pkt_diff, common_s2c_pkt_diff, common_c2s_byte_diff, common_s2c_byte_diff, common_schema_type, common_device_id, common_device_group, common_app_behavior, common_app_label, common_tunnels, common_protocol_label, common_l7_protocol, common_service_category, toDateTime(common_start_time) AS common_start_time, toDateTime(common_end_time) AS common_end_time, common_establish_latency_ms, common_con_duration_ms, common_stream_dir, common_stream_trace_id, common_c2s_ipfrag_num, common_s2c_ipfrag_num, common_c2s_tcp_lostlen, common_s2c_tcp_lostlen, common_c2s_tcp_unorder_num, common_s2c_tcp_unorder_num, common_c2s_pkt_retrans, common_s2c_pkt_retrans, common_c2s_byte_retrans, common_s2c_byte_retrans, common_tcp_client_isn, common_tcp_server_isn, toDateTime(common_processing_time) AS common_processing_time, http_url, http_host, http_domain, http_request_content_length, http_request_content_type, http_response_content_length, http_response_content_type, http_request_body, http_response_body, http_cookie, http_referer, http_user_agent, http_set_cookie, http_version, http_response_latency_ms, http_session_duration_ms, http_action_file_size, mail_protocol_type, mail_account, mail_from_cmd, mail_to_cmd, mail_from, mail_to, mail_cc, mail_bcc, mail_subject, mail_attachment_name, mail_eml_file, dns_message_id, dns_qr, dns_opcode, dns_aa, dns_tc, dns_rd, dns_ra, dns_rcode, dns_qdcount, dns_ancount, dns_nscount, dns_arcount, dns_qname, dns_qtype, dns_qclass, dns_sub, ssl_sni, ssl_cn, ssl_pinningst, ssl_intercept_state, ssl_server_side_latency, ssl_client_side_latency, ssl_server_side_version, ssl_client_side_version, ssl_cert_verify, ssl_error, ssl_con_latency_ms, ssl_ja3_hash, ssl_cert_issuer, ssl_cert_subject, quic_version, quic_sni, quic_user_agent, ftp_account, ftp_url, ftp_content, ftp_link_type, app_extra_info, sip_call_id, sip_originator_description, sip_responder_description, sip_user_agent, sip_server, sip_originator_sdp_connect_ip, sip_originator_sdp_media_port, sip_originator_sdp_media_type, sip_originator_sdp_content, sip_responder_sdp_connect_ip, sip_responder_sdp_media_port, sip_responder_sdp_media_type, sip_responder_sdp_content, sip_duration_s, sip_bye, rtp_payload_type_c2s, rtp_payload_type_s2c, rtp_pcap_path, rtp_originator_dir, ssh_version, ssh_auth_success, ssh_client_version, ssh_server_version, ssh_cipher_alg, ssh_mac_alg, ssh_compression_alg, ssh_kex_alg, ssh_host_key_alg, ssh_host_key, ssh_hassh, stratum_cryptocurrency, stratum_mining_pools, stratum_mining_program FROM tsg_galaxy_v3.interim_session_record AS interim_session_record WHERE common_recv_time >= toUnixTimestamp(@start) AND common_recv_time < toUnixTimestamp(@end) ORDER BY common_recv_time DESC LIMIT 43233, 20
--Q60.Function MAX_DURATION
SELECT destination_ip, IP_TO_GEO(destination_ip) AS destination_geo, MAX_DURATION(end_time,600) AS max_duration, any(destination_country) AS destination_country, groupUniqArray(arrayJoin(splitByString(',',source_country_list))) AS source_coutries,max(bit_rate) AS max_bit_rate,max(packet_rate) AS max_packet_rate,max(session_rate) AS max_session_rate,min(start_time) AS first_active_time,max(end_time) AS last_active_time,groupUniqArray(attack_type) AS attack_type,count(*) AS count from dos_event where start_time >= toUnixTimestamp(@start) AND start_time < toUnixTimestamp(@end) GROUP BY destination_ip ORDER BY count desc

View File

@@ -0,0 +1,15 @@
{
"version": "1.0",
"name": "hbase-Raw",
"namespace": "tsg",
"filters": [
{
"name":"@start",
"value": "'2021-10-19 10:00:00'"
},
{
"name":"@end",
"value": "'2021-10-20 11:00:00'"
}
]
}

View File

@@ -0,0 +1,4 @@
--Q01.
SELECT last_update_time FROM relation_account_framedip WHERE last_update_time>=CAST(TO_TIMESTAMP (@start,'yyyy-MM-dd HH:mm:ss','Asia/Shanghai') AS UNSIGNED_LONG) AND last_update_time<CAST(TO_TIMESTAMP (@end,'yyyy-MM-dd HH:mm:ss','Asia/Shanghai') AS UNSIGNED_LONG) LIMIT 30
--Q02. KV查询
select * from relation_account_framedip where ROWKEY = '0a771a381088e7d72ded13e998c06cbe' limit 1

View File

@@ -0,0 +1,42 @@
{
"type": "record",
"name": "job_result",
"namespace": "tsg_galaxy",
"fields": [
{
"name": "ROWKEY",
"label": "Row Key",
"type": "string"
},
{
"name": "is_done",
"label": "Done",
"type": "boolean"
},
{
"name": "is_canceled",
"label": "Canceled",
"type": "boolean"
},
{
"name": "done_progress",
"label": "Progress",
"type": "double"
},
{
"name": "last_query_time",
"label": "Last Query Time",
"type": "long"
},
{
"name": "duration_time",
"label": "Duration Time",
"type": "long"
},
{
"name": "discovery_field",
"label": "Discovery Field",
"type": "string"
}
]
}

View File

@@ -0,0 +1,133 @@
{
"type": "record",
"name": "proxy_event_hits_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "isp",
"label": "ISP",
"type": "string",
"doc": {
"visibility": "disabled"
}
},
{
"name": "entrance_id",
"label": "Entrance ID",
"type": "long",
"doc": {
"visibility": "disabled"
}
},
{
"name": "hits",
"label": "Hits",
"type": "long"
},
{
"name": "policy_id",
"label": "Policy ID",
"type": "long",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
}
}
},
{
"name": "action",
"label": "Action",
"type": "long"
},
{
"name": "sub_action",
"label": "Sub Action",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": [
{
"code": "allow",
"value": "Allow"
},
{
"code": "deny",
"value": "Deny"
},
{
"code": "monitor",
"value": "Monitor"
},
{
"code": "replace",
"value": "Replace"
},
{
"code": "redirect",
"value": "Redirect"
},
{
"code": "insert",
"value": "Insert"
},
{
"code": "hijack",
"value": "Hijack"
},
{
"code": "edit_element",
"value": "Edit Element"
}
]
}
},
{
"name": "ip_object",
"label": "IP Object",
"type": "string"
},
{
"name": "country",
"label": "Country",
"type": "string"
},
{
"name": "location",
"label": "Location",
"type": "string"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
}
]
}

View File

@@ -0,0 +1,27 @@
{
"type": "record",
"name": "recommendation_app_cip",
"namespace": "tsg_galaxy",
"fields": [
{
"name": "ROWKEY",
"label": "Row Key",
"type": "string"
},
{
"name": "app_label",
"label": "APP Label",
"type": "string"
},
{
"name": "last_update_time",
"label": "Last Update Time",
"type": "long"
},
{
"name": "client_ip_list",
"label": "Client IP List",
"type": "string"
}
]
}

View File

@@ -0,0 +1,37 @@
{
"type": "record",
"name": "relation_account_framedip",
"namespace": "tsg_galaxy",
"fields": [
{
"name":"ROWKEY",
"label":"Row Key",
"type":"string"
},
{
"name":"acct_status_type",
"label":"Acct Status Type",
"type":"string"
},
{
"name":"first_found_time",
"label":"First Found Time",
"type":"long"
},
{
"name":"last_update_time",
"label":"Last Update Time",
"type":"long"
},
{
"name":"framed_ip",
"label":"Framed IP",
"type":"string"
},
{
"name":"account",
"label":"Account",
"type":"string"
}
]
}

View File

@@ -0,0 +1,32 @@
{
"type": "record",
"name": "report_result",
"namespace": "tsg",
"fields": [
{
"name":"ROWKEY",
"label":"Row Key",
"type":"string"
},
{
"name":"excute_sql",
"label":"Excute SQL",
"type":"string"
},
{
"name":"read_rows",
"label":"Read Rows",
"type":"long"
},
{
"name":"result_id",
"label":"Result ID",
"type":"int"
},
{
"name":"result",
"label":"Result",
"type":"string"
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,97 @@
{
"type": "record",
"name": "security_event_hits_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "isp",
"label": "ISP",
"type": "string",
"doc": {
"visibility": "disabled"
}
},
{
"name": "entrance_id",
"label": "Entrance ID",
"type": "long",
"doc": {
"visibility": "disabled"
}
},
{
"name": "policy_id",
"label": "Policy ID",
"type": "long",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
}
}
},
{
"name": "action",
"label": "Action",
"type": "long",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": [
{
"code": "1",
"value": "Monitor"
},
{
"code": "2",
"value": "Intercept"
},
{
"code": "16",
"value": "Deny"
},
{
"code": "128",
"value": "Allow"
}
]
}
},
{
"name": "hits",
"label": "Hits",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,71 @@
{
"type": "record",
"name": "sys_storage_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"filters": [
"data_center"
],
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "log_type",
"label": "Log Type",
"type": "string"
},
{
"name": "max_size",
"label": "Max Size",
"type": "long"
},
{
"name": "used_size",
"label": "Used Size",
"type": "long"
},
{
"name": "aggregate_size",
"label": "Aggregate Size",
"type": "long"
},
{
"name": "last_storage",
"label": "Last Storage",
"type": "long"
}
]
}

View File

@@ -0,0 +1,93 @@
{
"type": "record",
"name": "top_client_ip_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "source",
"label": "Client IP",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "order_by",
"label": "Order By",
"type": "string"
}
]
}

View File

@@ -0,0 +1,93 @@
{
"type": "record",
"name": "top_external_host_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "destination",
"label": "External IP",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "order_by",
"label": "Order By",
"type": "string"
}
]
}

View File

@@ -0,0 +1,93 @@
{
"type": "record",
"name": "top_internal_host_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "source",
"label": "Internal IP",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "order_by",
"label": "Order By",
"type": "string"
}
]
}

View File

@@ -0,0 +1,93 @@
{
"type": "record",
"name": "top_server_ip_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "destination",
"label": "Server IP",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "order_by",
"label": "Order By",
"type": "string"
}
]
}

View File

@@ -0,0 +1,30 @@
{
"type": "record",
"name": "top_urls_log",
"namespace": "druid",
"doc": {
"partition_key": "__time"
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "url",
"label": "URL",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
}
]
}

View File

@@ -0,0 +1,93 @@
{
"type": "record",
"name": "top_user_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "subscriber_id",
"label": "Subscriber ID",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "order_by",
"label": "Order By",
"type": "string"
}
]
}

View File

@@ -0,0 +1,93 @@
{
"type": "record",
"name": "top_website_domain_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "domain",
"label": "Domain",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "order_by",
"label": "Order By",
"type": "string"
}
]
}

View File

@@ -0,0 +1,88 @@
{
"type": "record",
"name": "traffic_app_stat_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "app_name",
"label": "APP Name",
"type": "string"
},
{
"name": "session_num",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
}
]
}

View File

@@ -0,0 +1,286 @@
{
"type": "record",
"name": "traffic_metrics_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "device_id",
"label": "Device ID",
"type": "string"
},
{
"name": "entrance_id",
"label": "Entrance ID",
"type": "long",
"doc": {
"visibility": "disabled"
}
},
{
"name": "allow_conn_num",
"label": "Allow Sessions",
"type": "long"
},
{
"name": "allow_in_bytes",
"label": "Allow Bytes(Ingress)",
"type": "long"
},
{
"name": "allow_in_packets",
"label": "Allow Packets(Ingress)",
"type": "long"
},
{
"name": "allow_out_bytes",
"label": "Allow Bytes(Egress)",
"type": "long"
},
{
"name": "allow_out_packets",
"label": "Allow Packets(Egress)",
"type": "long"
},
{
"name": "close_conn_num",
"label": "Closed Sessions",
"type": "long"
},
{
"name": "default_conn_num",
"label": "Default Sessions",
"type": "long"
},
{
"name": "default_in_bytes",
"label": "Default Bytes(Ingress)",
"type": "long"
},
{
"name": "default_in_packets",
"label": "Default Packets(Ingress)",
"type": "long"
},
{
"name": "default_out_bytes",
"label": "Default Bytes(Egress)",
"type": "long"
},
{
"name": "default_out_packets",
"label": "Default Packets(Egress)",
"type": "long"
},
{
"name": "deny_conn_num",
"label": "Deny Sessions",
"type": "long"
},
{
"name": "deny_in_bytes",
"label": "Deny Bytes(Ingress)",
"type": "long"
},
{
"name": "deny_in_packets",
"label": "Deny Packets(Ingress)",
"type": "long"
},
{
"name": "deny_out_bytes",
"label": "Deny Bytes(Egress)",
"type": "long"
},
{
"name": "deny_out_packets",
"label": "Deny Packets(Egress)",
"type": "long"
},
{
"name": "intercept_conn_num",
"label": "Intercept Sessions",
"type": "long"
},
{
"name": "intercept_in_bytes",
"label": "Intercept Bytes(Ingress)",
"type": "long"
},
{
"name": "intercept_in_packets",
"label": "Intercept Packets(Ingress)",
"type": "long"
},
{
"name": "intercept_out_bytes",
"label": "Intercept Bytes(Egress)",
"type": "long"
},
{
"name": "intercept_out_packets",
"label": "Intercept Packets(Egress)",
"type": "long"
},
{
"name": "established_conn_num",
"label": "Established Sessions",
"type": "long"
},
{
"name": "monitor_conn_num",
"label": "Monitor Sessions",
"type": "long"
},
{
"name": "monitor_in_bytes",
"label": "Monitor Bytes(Ingress)",
"type": "long"
},
{
"name": "monitor_in_packets",
"label": "Monitor Packets(Ingress)",
"type": "long"
},
{
"name": "monitor_out_bytes",
"label": "Monitor Bytes(Egress)",
"type": "long"
},
{
"name": "monitor_out_packets",
"label": "Monitor Packets(Egress)",
"type": "long"
},
{
"name": "new_conn_num",
"label": "New Sessions",
"type": "long"
},
{
"name": "total_in_bytes",
"label": "Total Bytes(Ingress)",
"type": "long"
},
{
"name": "total_in_packets",
"label": "Total Packets(Ingress)",
"type": "long"
},
{
"name": "total_out_bytes",
"label": "Total Bytes(Egress)",
"type": "long"
},
{
"name": "total_out_packets",
"label": "Total Packets(Egress)",
"type": "long"
},
{
"name": "alert_bytes",
"label": "Alert Bytes",
"type": "long"
},
{
"name": "hijk_bytes",
"label": "Hijack Bytes",
"type": "long"
},
{
"name": "ins_bytes",
"label": "Insert Bytes",
"type": "long"
},
{
"name": "intcp_allow_num",
"label": "Intercept Allow Sessions",
"type": "long"
},
{
"name": "intcp_deny_num",
"label": "Intercept Deny Sessions",
"type": "long"
},
{
"name": "intcp_hijk_num",
"label": "Intercept Hijack Sessions",
"type": "long"
},
{
"name": "intcp_ins_num",
"label": "Intercept Insert Sessions",
"type": "long"
},
{
"name": "intcp_mon_num",
"label": "Intercept Monitor Sessions",
"type": "long"
},
{
"name": "intcp_rdirt_num",
"label": "Intercept Redirect Sessions",
"type": "long"
},
{
"name": "intcp_repl_num",
"label": "Intercept Replace Sessions",
"type": "long"
},
{
"name": "maybe_pinning_num",
"label": "Maybe Pinning Sessions",
"type": "long"
},
{
"name": "not_pinning_num",
"label": "Not Pinning Sessions",
"type": "long"
},
{
"name": "pinning_num",
"label": "Pinning Sessions",
"type": "long"
},
{
"name": "ad_cc_bytes",
"label": "AD CC Bytes",
"type": "long"
},
{
"name": "ad_flood_bytes",
"label": "AD Flood Bytes",
"type": "long"
},
{
"name": "ad_reflection_bytes",
"label": "AD Reflection Bytes",
"type": "long"
},
{
"name": "intcp_edit_elem_num",
"label": "Intercept Edit Element Sessions",
"type": "long"
}
]
}

View File

@@ -0,0 +1,132 @@
{
"type": "record",
"name": "traffic_protocol_stat_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"filters": [
"data_center",
"device_group"
],
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "protocol_id",
"label": "Protocol ID",
"type": "string"
},
{
"name": "isp",
"label": "ISP",
"type": "string"
},
{
"name": "entrance_id",
"label": "Entrance ID",
"type": "long"
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!="
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!="
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "sessions",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_ipfrag_num",
"label": "Fragmentation Packets(c2s)",
"type": "long"
},
{
"name": "s2c_ipfrag_num",
"label": "Fragmentation Packets(s2c)",
"type": "long"
},
{
"name": "c2s_tcp_lostlen",
"label": "Sequence Gap Loss(c2s)",
"type": "long"
},
{
"name": "s2c_tcp_lostlen",
"label": "Sequence Gap Loss(s2c)",
"type": "long"
},
{
"name": "c2s_tcp_unorder_num",
"label": "Unorder Packets(c2s)",
"type": "long"
},
{
"name": "s2c_tcp_unorder_num",
"label": "Unorder Packets(s2c)",
"type": "long"
}
]
}

View File

@@ -0,0 +1,174 @@
{
"type": "record",
"name": "traffic_summary_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "device_group",
"label": "Device Group",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagValue']",
"value": "$[?(@.tagType=='device_group')].subTags.[?(@.tagType=='device_group')]['tagName']"
}
}
},
{
"name": "isp",
"label": "ISP",
"type": "string",
"doc": {
"visibility": "disabled"
}
},
{
"name": "entrance_id",
"label": "Entrance ID",
"type": "long",
"doc": {
"visibility": "disabled"
}
},
{
"name": "schema_type",
"label": "Schema Type",
"type": "string",
"doc": {
"data": [
{
"code": "BASE",
"value": "BASE"
},
{
"code": "MAIL",
"value": "MAIL"
},
{
"code": "DNS",
"value": "DNS"
},
{
"code": "HTTP",
"value": "HTTP"
},
{
"code": "SSL",
"value": "SSL"
},
{
"code": "QUIC",
"value": "QUIC"
},
{
"code": "FTP",
"value": "FTP"
},
{
"code": "SSH",
"value": "SSH"
},
{
"code": "Stratum",
"value": "Stratum"
}
]
}
},
{
"name": "ip_object",
"label": "IP Object",
"type": "string"
},
{
"name": "sessions",
"label": "Sessions",
"type": "long"
},
{
"name": "c2s_byte_num",
"label": "Bytes Sent",
"type": "long"
},
{
"name": "s2c_byte_num",
"label": "Bytes Received",
"type": "long"
},
{
"name": "c2s_pkt_num",
"label": "Packets Sent",
"type": "long"
},
{
"name": "s2c_pkt_num",
"label": "Packets Received",
"type": "long"
},
{
"name": "one_sided_connections",
"label": "One Sided Connections",
"type": "long"
},
{
"name": "uncategorized_bytes",
"label": "Uncategorized Bytes",
"type": "long"
},
{
"name": "fragmentation_packets",
"label": "Fragmentation Packets",
"type": "long"
},
{
"name": "sequence_gap_loss",
"label": "Sequence Gap Loss",
"type": "long"
},
{
"name": "unorder_packets",
"label": "Unorder Packets",
"type": "long"
}
]
}

View File

@@ -0,0 +1,96 @@
{
"type": "record",
"name": "traffic_top_destination_ip_metrics_log",
"namespace": "druid",
"doc": {
"partition_key": "__time",
"functions": {
"$ref": "public_schema_info.json#/functions"
},
"schema_query": {
"filters": [
"common_data_center"
],
"references": {
"$ref": "public_schema_info.json#/schema_query/references"
}
}
},
"fields": [
{
"name": "__time",
"label": "Time",
"type": "string",
"doc": {
"constraints": {
"type": "timestamp"
}
}
},
{
"name": "common_data_center",
"label": "Data Center",
"type": "string",
"doc": {
"constraints": {
"operator_functions": "=,!=,in,not in"
},
"data": {
"$ref": "device_tag.json#",
"key": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagValue']",
"value": "$[?(@.tagType=='data_center')].subTags.[?(@.tagType=='data_center')]['tagName']"
}
}
},
{
"name": "common_sled_ip",
"label": "Sled IP",
"type": "string"
},
{
"name": "destination_ip",
"label": "Destination IP",
"type": "string"
},
{
"name": "attack_type",
"label": "Attack type",
"type": "string"
},
{
"name": "session_rate",
"label": "Sessions/s",
"type": "long",
"doc": {
"constraints": {
"type": "sessions/sec"
}
}
},
{
"name": "packet_rate",
"label": "Packets/s",
"type": "long",
"doc": {
"constraints": {
"type": "packets/sec"
}
}
},
{
"name": "bit_rate",
"label": "Bits/s",
"type": "long",
"doc": {
"constraints": {
"type": "bits/sec"
}
}
},
{
"name": "partition_num",
"label": "Partition Num",
"type": "long"
}
]
}