23.12 增加hbase、hos配置文件,修改hbase、phoenix建表语句

This commit is contained in:
houjinchuan
2023-12-15 16:28:24 +08:00
parent 079980005a
commit e625c5f0dc
6 changed files with 145 additions and 178 deletions

View File

@@ -5,11 +5,3 @@ create_namespace 'tsg_galaxy'
create 'tsg:report_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
create 'dos:ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood','DNS Flood'
create 'tsg_galaxy:relation_account_framedip', {NAME => 'radius', VERSIONS => 1,TTL=> '2592000'}, {NAME => 'common', VERSIONS => 1,TTL=> '2592000'}
create 'tsg_galaxy:relation_framedip_account', {NAME => 'radius', VERSIONS => 1,TTL=> '2592000'}, {NAME => 'common', VERSIONS => 1,TTL=> '2592000'}
create 'tsg_galaxy:recommendation_app_cip', {NAME => 'common', VERSIONS => 1}
create 'tsg_galaxy:relation_user_teid',{NAME=>'gtp',TTL=> '604800'}, {NAME => 'common',TTL=> '604800'}
create 'tsg_galaxy:gtpc_knowledge_base',{NAME => 'gtp',TTL=> '604800'}, {NAME => 'common',TTL=> '604800'},SPLITS => ['1','2','3']

View File

@@ -3,34 +3,6 @@ CREATE schema IF NOT EXISTS "tsg";
CREATE view "tsg"."report_result"( ROWKEY VARCHAR PRIMARY KEY, "detail"."excute_sql" VARCHAR, "detail"."read_rows" UNSIGNED_LONG, "detail"."result_id" UNSIGNED_INT, "response"."result" VARCHAR);
CREATE view IF NOT EXISTS "tsg_galaxy"."relation_account_framedip"(
ROWKEY VARCHAR PRIMARY KEY,
"common"."vsys_id" UNSIGNED_INT,
"radius"."account" VARCHAR,
"radius"."framed_ip" VARCHAR,
"radius"."first_found_time" UNSIGNED_LONG,
"radius"."last_update_time" UNSIGNED_LONG,
"radius"."acct_status_type" UNSIGNED_INT);
CREATE view "tsg_galaxy"."recommendation_app_cip"(
ROWKEY VARCHAR PRIMARY KEY,
"common"."app_label" VARCHAR,
"common"."client_ip_list" VARCHAR,
"common"."last_update_time" UNSIGNED_LONG);
CREATE view IF NOT EXISTS "tsg_galaxy"."gtpc_knowledge_base"(
ROWKEY VARCHAR PRIMARY KEY,
"common"."vsys_id" UNSIGNED_INT,
"gtp"."teid" UNSIGNED_LONG,
"gtp"."uplink_teid" UNSIGNED_LONG,
"gtp"."downlink_teid" UNSIGNED_LONG,
"gtp"."apn" VARCHAR,
"gtp"."phone_number" VARCHAR,
"gtp"."imsi" VARCHAR,
"gtp"."imei" VARCHAR,
"gtp"."msg_type" UNSIGNED_INT,
"gtp"."last_update_time" UNSIGNED_LONG);
CREATE table IF NOT EXISTS "tsg_galaxy"."job_result"(
ROWKEY VARCHAR PRIMARY KEY,
"detail"."is_error" BOOLEAN,

View File

@@ -48,7 +48,7 @@
<property>
<name>hbase.server.keyvalue.maxsize</name>
<value>1073741824</value>
<value>5368709120</value>
</property>
<property>
@@ -128,12 +128,16 @@
<property>
<name>hbase.hregion.memstore.flush.size</name>
<value>134217728</value>
<value>33554432</value>
<description>
memstore的大小超过该限制单位byte后将被flush到磁盘。这个大小由一个线程间断性的检查检查的间隔由
hbase.server.thread.wakefrequency决定
</description>
</property>
<property>
<name>hbase.hstore.flusher.count</name>
<value>4</value>
<value>2</value>
</property>
<property>
@@ -164,6 +168,12 @@
<property>
<name>hfile.block.cache.size</name>
<value>0.3</value>
<description>
Percentage of maximum heap (-Xmx setting) to allocate to block cache
used by a StoreFile. Default of 0.4 means allocate 40%.
Set to 0 to disable but it's not recommended; you need at least
enough cache to hold the storefile indices.
</description>
</property>
<property>
@@ -197,7 +207,7 @@
<property>
<name>hbase.hstore.blockingWaitTime</name>
<value>20000</value>
<value>30000</value>
<description>
The time an HRegion will block updates for after hitting the StoreFile
limit defined by hbase.hstore.blockingStoreFiles.
@@ -219,12 +229,17 @@
<property>
  <name>hbase.hregion.max.filesize</name>
  <value>10737418240</value>
  <value>3221225472</value>
</property>
<property>
<name>hbase.regionserver.regionSplitLimit</name>
<value>500</value>
<value>1000</value>
</property>
<property>
<name>hbase.replication</name>
<value>true</value>
</property>
<property>
@@ -253,50 +268,4 @@
  <value>true</value>
</property>
<!-- storefile数量大于该值执行compact -->
<property>
<name>hbase.hstore.compactionThreshold</name>
<value>5</value>
</property>
<property>
<name>hbase.hstore.compaction.min</name>
<value>5</value>
</property>
<!-- 最多选取多少个storefile进行compace -->
<property>
<name>hbase.hstore.compaction.max</name>
<value>20</value>
</property>
<property>
<name>hbase.hstore.compaction.min.size</name>
<value>134217728</value>
</property>
<property>
<name>hbase.hstore.compaction.max.size</name>
<value>10737418240</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.throttle</name>
<value>2684354560</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.small</name>
<value>5</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.large</name>
<value>5</value>
</property>
<property>
<name>hbase.hstore.compaction.kv.max</name>
<value>10</value>
</property>
<property>
<name>hbase.mob.compaction.threads.max</name>
<value>2</value>
</property>
<property>
<name>hbase.hregion.majorcompaction</name>
<value>604800000</value>
</property>
</configuration>

View File

@@ -1,4 +1,3 @@
#服务端口
server:
port: 8186
tomcat:
@@ -9,18 +8,21 @@ tomcat:
#hbase参数
hbase:
zookeeper_quorum: 192.168.44.11,192.168.44.14,192.168.44.15
zookeeper_port: 2181
zookeeper_property_clientPort: 2181
zookeeper_znode_parent: /hbase
client_retries_number: 9
rpc_timeout: 100000
connect_pool: 10
client_write_buffer: 10485760
client_key_value_maxsize: 1073741824
mob_threshold: 10485760
client_keyvalue_maxsize: 1024000000
#批量获取数量
get_batch: 10000
#part 最大数据量
max_parts: 100000
maxParts: 100000
#每次获取的part数
get_part_batch: 10
get_part_batch: 1000
#每次追加是否更新主文件
isUpdate: 1
#hbase索引表前缀前缀为以下的都为索引表
time_index_table_prefix: index_time_
filename_index_table_prefix: index_filename_
@@ -31,22 +33,25 @@ hbase:
filename_head: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
part_head: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
#获取文件大小的目录
data_path: /hbase
#1是集群0是单机
standalone: 1
hbasePath: /hbase/hbase-2.2.3
#1是集群0是单机,主要针对存储配额获取方式
standone: 1
#hadoop集群namenode节点
hadoop_name_nodes: 192.168.44.11,192.168.44.14
namenodes: 192.168.44.11,192.168.44.14
#hadoop端口
hadoop_port: 9000
hadoop_user: root
hadoop_default_fs: hdfs://ns1
hadoop_name_services: ns1
hadoop_name_nodes_ns1: nn1,nn2
hadoop_defaultFS: hdfs://ns1
hadoop_nameservices: ns1
hadoop_namenodes_ns1: nn1,nn2
hadoop_replication: 2
#建表时是否打开hbase wal1打开0关闭
openWal: 0
#ttl相关参数
ttl_scan_batch: 1000
ttl_scan_batch: 500
ttl_scan_caching: 1000
ttl_delete_batch: 1000
#是否打开验证0打开打开需要使用S3身份验证或者token访问服务
auth:
open: 0
@@ -54,13 +59,9 @@ auth:
token: ENC(vknRT6U4I739rLIha9CvojM+4uFyXZLEYpO2HZayLnRak1HPW0K2yZ3vnQBA2foo)
#s3验证
s3:
accesskey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
secretkey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
accessKey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
secretKey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
hos:
#文件大小阈值
maxFileSize: 5073741800
#大文件阈值
uploadThreshold: 104857600
#长连接超时时间
keepAliveTimeout: 60000
#批量删除对象的最大数量
@@ -70,21 +71,33 @@ hos:
#分块上传的最大分块数
maxPartNumber: 10000
#追加上传的最大次数
maxAppendNumber: 100000
maxPosition: 100000
#存放对象的用户自定义元数据的请求头
metaHeader: x-hos-meta-message
#存放对象信息的请求头
objectInfo: x-hos-object-info
#是否快速下载文件1打开hbase内存小于20G的集群设为0
isQuickDownloadFile: 0
#是否打开对象列表查询功能1打开
simple: 1
#用户白名单hbase的namespace获取存储配额
users: default
#元数据存储占比
metaProportion: 0.02
metaProportion: 0.03
#是否打开限流,0:关闭1:打开
openRateLimiter: 0
openRateLimiter: 1
#限流每秒请求数
rateLimiterQps: 20000
#是否打开手动ttl1打开默认为1
manualTtl: 1
#展示追加文件丢失块的最大数量
lostPartsCount: 10
#执行ttl的线程数
ttlThread: 10
thread: 10
#是否打开手动ttl1打开默认为1
manualTtl: 0
#文件最大值
maxFileSize: 5368709000
#小文件阈值
uploadThreshold: 10485760
#设置上传文件大小的最大值
spring:
servlet:

View File

@@ -87,7 +87,7 @@
<property>
<name>hbase.mob.file.cache.size</name>
<value>10000</value>
<value>5000</value>
<description>
Number of opened file handlers to cache.
A larger value will benefit reads by providing more file handlers per mob
@@ -128,16 +128,12 @@
<property>
<name>hbase.hregion.memstore.flush.size</name>
<value>33554432</value>
<description>
memstore的大小超过该限制单位byte后将被flush到磁盘。这个大小由一个线程间断性的检查检查的间隔由
hbase.server.thread.wakefrequency决定
</description>
<value>134217728</value>
</property>
<property>
<name>hbase.hstore.flusher.count</name>
<value>2</value>
<value>4</value>
</property>
<property>
@@ -168,21 +164,30 @@
<property>
<name>hfile.block.cache.size</name>
<value>0.3</value>
<description>
Percentage of maximum heap (-Xmx setting) to allocate to block cache
used by a StoreFile. Default of 0.4 means allocate 40%.
Set to 0 to disable but it's not recommended; you need at least
enough cache to hold the storefile indices.
</description>
</property>
<property>
<name>hbase.bucketcache.ioengine</name>
<value>offheap</value>
</property>
<property>
<name>hbase.bucketcache.size</name>
<value>512</value>
</property>
<property>
<name>hbase.hregion.memstore.block.multiplier</name>
<value>2</value>
<value>4</value>
</property>
<property>
<name>hbase.ipc.server.max.callqueue.length</name>
<value>10000</value>
</property>
<property>
<name>hbase.ipc.server.max.callqueue.size</name>
<value>1073741824</value>
</property>
@@ -207,7 +212,7 @@
<property>
<name>hbase.hstore.blockingWaitTime</name>
<value>30000</value>
<value>20000</value>
<description>
The time an HRegion will block updates for after hitting the StoreFile
limit defined by hbase.hstore.blockingStoreFiles.
@@ -229,17 +234,12 @@
<property>
  <name>hbase.hregion.max.filesize</name>
  <value>3221225472</value>
  <value>10737418240</value>
</property>
<property>
<name>hbase.regionserver.regionSplitLimit</name>
<value>1000</value>
</property>
<property>
<name>hbase.replication</name>
<value>true</value>
<value>500</value>
</property>
<property>
@@ -268,4 +268,38 @@
  <value>true</value>
</property>
<!-- storefile数量大于该值执行compact -->
<property>
<name>hbase.hstore.compactionThreshold</name>
<value>5</value>
</property>
<property>
<name>hbase.hstore.compaction.min</name>
<value>5</value>
</property>
<!-- 最多选取多少个storefile进行compace -->
<property>
<name>hbase.hstore.compaction.max</name>
<value>20</value>
</property>
<property>
<name>hbase.hstore.compaction.min.size</name>
<value>134217728</value>
</property>
<property>
<name>hbase.hstore.compaction.max.size</name>
<value>10737418240</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.small</name>
<value>5</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.large</name>
<value>5</value>
</property>
<property>
<name>hbase.hregion.majorcompaction</name>
<value>604800000</value>
</property>
</configuration>

View File

@@ -1,3 +1,4 @@
#服务端口
server:
port: 8186
tomcat:
@@ -8,21 +9,18 @@ tomcat:
#hbase参数
hbase:
zookeeper_quorum: 192.168.44.11,192.168.44.14,192.168.44.15
zookeeper_property_clientPort: 2181
zookeeper_port: 2181
zookeeper_znode_parent: /hbase
client_retries_number: 9
rpc_timeout: 100000
connect_pool: 10
client_write_buffer: 10485760
client_keyvalue_maxsize: 1024000000
#批量获取数量
get_batch: 10000
#part 最大数据量
maxParts: 100000
client_key_value_maxsize: 1073741824
mob_threshold: 10485760
#part的最大数量
max_parts: 100000
#每次获取的part数
get_part_batch: 1000
#每次追加是否更新主文件
isUpdate: 1
get_part_batch: 10
#hbase索引表前缀前缀为以下的都为索引表
time_index_table_prefix: index_time_
filename_index_table_prefix: index_filename_
@@ -33,25 +31,22 @@ hbase:
filename_head: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
part_head: 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f
#获取文件大小的目录
hbasePath: /hbase/hbase-2.2.3
#1是集群0是单机,主要针对存储配额获取方式
standone: 1
data_path: /hbase
#1是集群0是单机
standalone: 1
#hadoop集群namenode节点
namenodes: 192.168.44.11,192.168.44.14
hadoop_name_nodes: 192.168.44.11,192.168.44.14
#hadoop端口
hadoop_port: 9000
hadoop_user: root
hadoop_defaultFS: hdfs://ns1
hadoop_nameservices: ns1
hadoop_namenodes_ns1: nn1,nn2
hadoop_default_fs: hdfs://ns1
hadoop_name_services: ns1
hadoop_name_nodes_ns1: nn1,nn2
hadoop_replication: 2
#建表时是否打开hbase wal1打开0关闭
openWal: 0
#ttl相关参数
ttl_scan_batch: 500
ttl_scan_batch: 1000
ttl_scan_caching: 1000
ttl_delete_batch: 1000
#是否打开验证0打开打开需要使用S3身份验证或者token访问服务
auth:
open: 0
@@ -59,9 +54,13 @@ auth:
token: ENC(vknRT6U4I739rLIha9CvojM+4uFyXZLEYpO2HZayLnRak1HPW0K2yZ3vnQBA2foo)
#s3验证
s3:
accessKey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
secretKey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
accesskey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
secretkey: ENC(FUQDvVP+zqCiwHQhXcRvbw==)
hos:
#文件大小阈值
maxFileSize: 5073741800
#大文件阈值
uploadThreshold: 104857600
#长连接超时时间
keepAliveTimeout: 60000
#批量删除对象的最大数量
@@ -71,39 +70,27 @@ hos:
#分块上传的最大分块数
maxPartNumber: 10000
#追加上传的最大次数
maxPosition: 100000
#存放对象的用户自定义元数据的请求头
metaHeader: x-hos-meta-message
#存放对象信息的请求头
objectInfo: x-hos-object-info
maxAppendNumber: 100000
#是否快速下载文件1打开hbase内存小于20G的集群设为0
isQuickDownloadFile: 0
#是否打开对象列表查询功能1打开
simple: 1
#用户白名单hbase的namespace获取存储配额
users: default
#元数据存储占比
metaProportion: 0.03
metaProportion: 0.01
#是否打开限流,0:关闭1:打开
openRateLimiter: 1
openRateLimiter: 0
#限流每秒请求数
rateLimiterQps: 20000
#展示追加文件丢失块的最大数量
lostPartsCount: 10
#执行ttl的线程数
thread: 10
#是否打开手动ttl1打开默认为1
manualTtl: 0
#文件最大值
maxFileSize: 5368709000
#小文件阈值
uploadThreshold: 10485760
manualTtl: 1
#执行ttl的线程数
ttlThread: 10
#设置上传文件大小的最大值
spring:
servlet:
multipart:
max-file-size: 1024MB
max-request-size: 1024MB
max-file-size: 5GB
max-request-size: 5GB
#Prometheus参数
application:
name: HosServiceApplication