增加24.09相关初始化sql及模版配置文件

This commit is contained in:
doufenghu
2024-11-08 16:49:43 +08:00
parent f20d93b792
commit 446662f03d
61 changed files with 2807 additions and 256 deletions

View File

@@ -1,97 +0,0 @@
#服务端口
server:
port: 8186
max-http-header-size: 20MB
tomcat:
max-threads: 400
#tomcat缓存大小单位KB系统默认10M配置10g
tomcat:
cacheMaxSize: 1000000
#hbase参数
hbase:
zookeeperQuorum: 192.168.44.11:2181,192.168.44.14:2181,192.168.44.15:2181
zookeeperPort: 2181
zookeeperNodeParent: /hbase
clientRetriesNumber: 9
rpcTimeout: 100000
connectPool: 10
clientWriteBuffer: 10485760
clientKeyValueMaxsize: 1073741824
mobThreshold: 10485760
#part的最大数量
maxParts: 100000
#每次获取的part数
getPartBatch: 10
#hbase索引表前缀前缀为以下的都为索引表
timeIndexTablePrefix: index_time_
filenameIndexTablePrefix: index_filename_
partFileIndexTablePrefix: index_partfile_
systemBucketMeta: system:bucket_meta
#创建表的分区数
regionCount: 16
filenameHead: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
partHead: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
#获取文件大小的目录
dataPath: /hbase
#hadoop集群namenode节点单机为单个ip集群为ip1,ip2
hadoopNameNodes: 192.168.44.10,192.168.44.11
#副本数单机为1集群为2
hadoopReplication: 2
#hadoop端口
hadoopPort: 9000
hadoopUser: root
hadoopNameServices: ns1
hadoopNameNodesNs1: nn1,nn2
asyncPut: 0
#是否打开验证0打开打开需要使用S3身份验证或者token访问服务
auth:
open: 0
#http访问使用的token
token: ENC(vknRT6U4I739rLIha9CvojM+4uFyXZLEYpO2HZayLnRak1HPW0K2yZ3vnQBA2foo)
#s3验证
s3:
accesskey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
secretkey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
hos:
#文件大小阈值
maxFileSize: 5073741800
#大文件阈值
uploadThreshold: 104857600
#长连接超时时间
keepAliveTimeout: 60000
#批量删除对象的最大数量
deleteMultipleNumber: 1000
#获取对象列表等操作的最大值
maxResultLimit: 100000
#分块上传的最大分块数
maxPartNumber: 10000
#追加上传的最大次数
maxAppendNumber: 100000
#是否快速上传
isQuickUpload: 0
#是否快速下载文件1打开hbase内存小于20G的集群设为0
isQuickDownloadFile: 0
#用户白名单hbase的namespace获取存储配额
users: default
#是否打开限流,0:关闭1:打开
openRateLimiter: 0
#限流每秒请求数
rateLimiterQps: 20000
#设置上传文件大小的最大值
spring:
servlet:
multipart:
max-file-size: 5GB
max-request-size: 5GB
#Prometheus参数
application:
name: HosServiceApplication
#Prometheus参数
management:
endpoints:
web:
exposure:
include: '*'
metrics:
tags:
application: ${spring.application.name}

View File

@@ -1,21 +0,0 @@
qgw.serverAddr=http://{{ vrrp_instance.default.virtual_ipaddress }}:9999
hos.serverAddr=http://{{ vrrp_instance.oss.virtual_ipaddress }}:9098
hos.token={{ hos_token }}
kafka.server={{ groups.kafka[0] }}:9092
#延迟时间,校验多少秒之前的文件,单位秒
check.time.delay=180
hos.traffic.buckets=traffic_policy_capture_file_bucket,traffic_rtp_file_bucket,traffic_http_file_bucket,traffic_eml_file_bucket
kafka.traffic.topics=TRAFFIC-POLICY-CAPTURE-FILE-STREAM-RECORD,TRAFFIC-RTP-FILE-STREAM-RECORD,TRAFFIC-HTTP-FILE-STREAM-RECORD,TRAFFIC-EML-FILE-STREAM-RECORD
kafka.troubleshooting.topic=TROUBLESHOOTING-FILE-STREAM-RECORD
file.chunk.combiner.window.time=15000
traffic.file.count=10
threads=1
max.threads=10
print.out.interval=1000
http.max.total=100
http.default.max.per.route=100
http.connect.timeout=5000
http.connection.request.timeout=10000
http.socket.timeout=-1
hos.log.types=security_event,monitor_event,proxy_event,session_record,voip_record,assessment_event,transaction_record,troubleshooting
hos.log.types.file.types.url.fields=security_event:http-http_response_body&http_request_body,pcap-packet_capture_file&rtp_pcap_path,eml-mail_eml_file;proxy_event:http-http_response_body&http_request_body;session_record:http-http_response_body&http_request_body,pcap-packet_capture_file&rtp_pcap_path,eml-mail_eml_file;voip_record:pcap-rtp_pcap_path;assessment_event:other-assessment_file;transaction_record:http-http_response_body&http_request_body,eml-mail_eml_file;monitor_event:http-http_response_body&http_request_body,pcap-packet_capture_file&rtp_pcap_path,eml-mail_eml_file

Binary file not shown.

View File

@@ -1,138 +0,0 @@
#!/bin/bash
version="1.4"
jar="galaxy-hos-util-$version.jar"
usage() {
cat <<EOF
Usage: ./hosutil.sh [command] [-h] [options...]
Available commands:
download Download individual or batch files
upload Upload individual or batch files
check Check file availability
combiner Verify if the file-chunk-combiner data stream is correct
version Print the version
Options for 'download' command:
-b, --bucket The bucket to access.
-d, --directory Directory to save files. If not exists, will be created. Default is ./download/.
-k, --keys Files to download. Can be a single or multiple files separated by commas.
-p, --prefix Prefix for batch downloading files based on file name.
-s, --start_time Start time in UTC format (yyyyMMdd, yyyy-MM-dd, yyyyMMddHHmmss). Default is the previous day's time.
-e, --end_time End time in UTC format (yyyyMMdd, yyyy-MM-dd, yyyyMMddHHmmss). Default is current time.
-c, --count Number of files to download. Default is 1000, maximum is 100000.
-t, --threads Number of threads. Default is 1, maximum is 10.
Options for 'upload' command:
-b, --bucket The bucket to access.
-d, --directory Directory where files to upload are located. Default is ./upload/.
-t, --threads Number of threads. Default is 1, maximum is 10.
Options for 'check' command:
-s, --start_time Start time in UTC format (yyyyMMdd, yyyy-MM-dd, yyyyMMddHHmmss). Default is the previous day's time.
-e, --end_time End time in UTC format (yyyyMMdd, yyyy-MM-dd, yyyyMMddHHmmss). Default is current time.
-c, --count Number of logs to evaluate. Default is 1000, maximum is 100000.
-d, --data_center Specify the data centers to evaluate, separated by commas. If not specified, all data centers are evaluated.
-l, --log_type Specify the logs to evaluate, separated by commas. If not specified, all logs are evaluated.
Supported logs: security_event, monitor_event, proxy_event, session_record, voip_record, assessment_event, transaction_record, troubleshooting.
-f, --file_type Specify file types. If not specified, all types are evaluated. Supported types: eml, http, pcap, other.
Only session_record, security_event, monitor_event, transaction_record support multiple types.
-t --threads Number of threads. Default is 1, maximum is 10.
Options for 'combiner' command:
-j, --job Job to verify. Options: traffic, troubleshooting. Default is traffic.(Troubleshooting job removed in version 24.05)
EOF
}
# 初始化默认值
bucket=""
directory=""
keys=""
prefix=""
start_time=""
end_time=""
count=1000
threads=1
log_type=""
file_type=""
data_center=""
job_name="traffic"
# 检查必填参数
check_required() {
case "$operation" in
download|upload)
if [ -z "$bucket" ]; then
echo "Error: bucket is required for $operation."
exit 1
fi
;;
*)
# 对于其他操作,不需要检查特定参数
;;
esac
}
# 下载函数
download() {
directory=${directory:-"./download/"}
check_required
java -jar $jar download $bucket $directory keys=$keys prefix=$prefix max_keys=$count time_range=$start_time/$end_time thread_num=$threads
}
# 上传函数
upload() {
directory=${directory:-"./upload/"}
check_required
java -jar $jar upload $bucket $directory thread_num=$threads
}
# 检查函数
check() {
java -jar $jar check data_center=$data_center log_type=$log_type file_type=$file_type max_logs=$count time_range=$start_time/$end_time thread_num=$threads
}
# 合并器函数
combiner() {
java -jar $jar combiner $job_name
}
# 主操作流程
if [ $# -eq 0 ];then
usage
exit 0
fi
operation=$1
shift
while getopts ":h:b:d:k:p:s:e:c:t:l:f:j:" opt; do
case $opt in
h) usage; exit 0 ;;
b) bucket=$OPTARG ;;
d) if [ "$operation" == "check" ]; then data_center=$OPTARG; else directory=$OPTARG; fi ;;
k) keys=$OPTARG ;;
p) prefix=$OPTARG ;;
s) start_time=$OPTARG ;;
e) end_Time=$OPTARG ;;
c) count=$OPTARG ;;
t) threads=$OPTARG ;;
l) log_type=$OPTARG ;;
f) file_type=$OPTARG ;;
j) job_name=$OPTARG ;;
\?) echo "Invalid option: -$OPTARG" >&2; usage; exit 1 ;;
:) echo "Option -$OPTARG requires an argument" >&2; usage; exit 1 ;;
esac
done
case "$operation" in
download) download ;;
upload) upload ;;
check) check ;;
combiner) combiner ;;
version) echo $version ;;
*) usage; exit 1 ;;
esac