24.01 hbase、phoenix更新表

This commit is contained in:
houjinchuan
2024-01-16 16:47:01 +08:00
parent 591b449a76
commit 4fa4529b62
7 changed files with 120 additions and 4 deletions

View File

@@ -1,7 +1,6 @@
create_namespace 'tsg'
create_namespace 'dos'
create_namespace 'tsg_galaxy'
create 'tsg:report_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
create 'tsg_galaxy:saved_query_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
create 'dos:ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood','DNS Flood'

View File

@@ -1,7 +1,6 @@
CREATE schema IF NOT EXISTS "tsg_galaxy";
CREATE schema IF NOT EXISTS "tsg";
CREATE view "tsg"."report_result"( ROWKEY VARCHAR PRIMARY KEY, "detail"."excute_sql" VARCHAR, "detail"."read_rows" UNSIGNED_LONG, "detail"."result_id" UNSIGNED_INT, "response"."result" VARCHAR);
CREATE view IF NOT EXISTS "tsg_galaxy"."saved_query_result"( ROWKEY VARCHAR PRIMARY KEY, "detail"."excute_sql" VARCHAR, "detail"."read_rows" UNSIGNED_LONG, "detail"."result_id" UNSIGNED_INT, "response"."result" VARCHAR);
CREATE table IF NOT EXISTS "tsg_galaxy"."job_result"(
ROWKEY VARCHAR PRIMARY KEY,

View File

@@ -0,0 +1,6 @@
disable 'tsg:report_result'
drop 'tsg:report_result'
drop_namespace 'tsg'
create 'tsg_galaxy:saved_query_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}

View File

@@ -0,0 +1,108 @@
#服务端口
server:
port: 8186
max-http-header-size: 20MB
tomcat:
max-threads: 400
#tomcat缓存大小单位KB系统默认10M配置10g
tomcat:
cacheMaxSize: 1000000
#hbase参数
hbase:
zookeeper_quorum: 192.168.44.11,192.168.44.14,192.168.44.15
zookeeper_port: 2181
zookeeper_znode_parent: /hbase
client_retries_number: 9
rpc_timeout: 100000
connect_pool: 10
client_write_buffer: 10485760
client_key_value_maxsize: 1073741824
mob_threshold: 10485760
#part的最大数量
max_parts: 100000
#每次获取的part数
get_part_batch: 10
#hbase索引表前缀前缀为以下的都为索引表
time_index_table_prefix: index_time_
filename_index_table_prefix: index_filename_
partfile_index_table_prefix: index_partfile_
system_bucket_meta: system:bucket_meta
#创建表预分区时的分区,为空则不分区
region_start_key: 1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
filename_head: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
part_head: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
#获取文件大小的目录
data_path: /hbase
#1是集群0是单机
standalone: 1
#hadoop集群namenode节点
hadoop_name_nodes: 192.168.44.11,192.168.44.14
#hadoop端口
hadoop_port: 9000
hadoop_user: root
hadoop_default_fs: hdfs://ns1
hadoop_name_services: ns1
hadoop_name_nodes_ns1: nn1,nn2
hadoop_replication: 2
#ttl相关参数
ttl_scan_batch: 1000
ttl_scan_caching: 1000
ttl_delete_batch: 1000
#是否打开验证0打开打开需要使用S3身份验证或者token访问服务
auth:
open: 0
#http访问使用的token
token: ENC(vknRT6U4I739rLIha9CvojM+4uFyXZLEYpO2HZayLnRak1HPW0K2yZ3vnQBA2foo)
#s3验证
s3:
accesskey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
secretkey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
hos:
#文件大小阈值
maxFileSize: 5073741800
#大文件阈值
uploadThreshold: 104857600
#长连接超时时间
keepAliveTimeout: 60000
#批量删除对象的最大数量
deleteMultipleNumber: 1000
#获取对象列表等操作的最大值
maxResultLimit: 100000
#分块上传的最大分块数
maxPartNumber: 10000
#追加上传的最大次数
maxAppendNumber: 100000
#是否快速下载文件1打开hbase内存小于20G的集群设为0
isQuickDownloadFile: 0
#用户白名单hbase的namespace获取存储配额
users: default
#元数据存储占比
metaProportion: 0.01
#是否打开限流,0:关闭1:打开
openRateLimiter: 0
#限流每秒请求数
rateLimiterQps: 20000
#是否打开手动ttl1打开默认为1
manualTtl: 1
#执行ttl的线程数
ttlThread: 10
#设置上传文件大小的最大值
spring:
servlet:
multipart:
max-file-size: 5GB
max-request-size: 5GB
#Prometheus参数
application:
name: HosServiceApplication
#Prometheus参数
management:
endpoints:
web:
exposure:
include: '*'
metrics:
tags:
application: ${spring.application.name}
logging:
config: ./config/log4j2-dev.xml

View File

@@ -0,0 +1,4 @@
DROP view IF EXISTS "tsg"."report_result";
DROP schema IF EXISTS "tsg";
CREATE view IF NOT EXISTS "tsg_galaxy"."saved_query_result"( ROWKEY VARCHAR PRIMARY KEY, "detail"."excute_sql" VARCHAR, "detail"."read_rows" UNSIGNED_LONG, "detail"."result_id" UNSIGNED_INT, "response"."result" VARCHAR);