管理全局业务配置文件,可以配置Flink,druid任务默认参数
This commit is contained in:
181
tsg_olap/installation/configuration/tsg-olap.yml
Normal file
181
tsg_olap/installation/configuration/tsg-olap.yml
Normal file
@@ -0,0 +1,181 @@
|
||||
config_namespace: "prod"
|
||||
|
||||
# Name of the data center
|
||||
data_center_name: xxg
|
||||
data_center_id_num: 2
|
||||
|
||||
galaxy_qgw_service:
|
||||
# Running memory of the Galaxy-qgw-service.
|
||||
java_opts: "-Xms1024m -Xmx3120m -XX:+ExitOnOutOfMemoryError"
|
||||
|
||||
galaxy_job_service:
|
||||
# Running memory of the Galaxy-job-admin.
|
||||
admin_java_opts: "-Xms512m -Xmx740m"
|
||||
# Running memory of the Galaxy-job-executor.
|
||||
executor_java_opts: "-Xms512m -Xmx1024m"
|
||||
|
||||
saved_query_scheduler:
|
||||
# Running memory of the saved-query-scheduler.
|
||||
java_opts: "-Xms512m -Xmx1024m"
|
||||
|
||||
druid:
|
||||
# Druid job parallelism
|
||||
index_kafka_statistics_rule:
|
||||
taskCount: 1
|
||||
druid.indexer.runner.javaOpts: "-server -Xms1g -Xmx2g -XX:MaxDirectMemorySize=1g"
|
||||
index_kafka_application_protocol_stat:
|
||||
taskCount: 1
|
||||
index_kafka_dos_protection_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_dos_protection_rule_metric:
|
||||
taskCount: 1
|
||||
index_kafka_dos_sketch_top_server_ip:
|
||||
taskCount: 1
|
||||
index_kafka_monitor_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_object_statistics:
|
||||
taskCount: 1
|
||||
index_kafka_proxy_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_security_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_service_chaining_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_service_function_status:
|
||||
taskCount: 1
|
||||
index_kafka_statistics_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_top_client_countries:
|
||||
taskCount: 1
|
||||
index_kafka_top_client_ips:
|
||||
taskCount: 1
|
||||
index_kafka_top_external_ips:
|
||||
taskCount: 1
|
||||
index_kafka_top_internal_ips:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_countries:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_domains:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_fqdns:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_ips:
|
||||
taskCount: 1
|
||||
index_kafka_traffic_general_stat:
|
||||
taskCount: 1
|
||||
index_kafka_traffic_shaping_rule_hits:
|
||||
taskCount: 1
|
||||
|
||||
# default value
|
||||
default_init:
|
||||
flink:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 2048m
|
||||
taskmanager.memory.jvm-metaspace.size: 256m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
topology: |
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [etl_processor]
|
||||
- name: etl_processor
|
||||
downstream: [clickhouse_sink]
|
||||
- name: clickhouse_sink
|
||||
|
||||
flink:
|
||||
app-protocol-stat-traffic-merge:
|
||||
agg_app_protocol_traffic:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: agg_app_protocol_traffic
|
||||
|
||||
dos-detection:
|
||||
detection_dos_attack:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: detection_dos_attack
|
||||
|
||||
file-chunk-combiner:
|
||||
agg_traffic_file_chunk_combine:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: agg_traffic_file_chunk_combine
|
||||
|
||||
sip-rtp-correlation:
|
||||
correlation_sip_rtp_session:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: correlation_sip_rtp_session
|
||||
|
||||
groot-stream:
|
||||
dos_event_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: dos_event.yaml.j2
|
||||
kafka_source_topic: DOS-EVENT
|
||||
kafka_sink_topic: DOS-EVENT
|
||||
topology: |
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [clickhouse_sink]
|
||||
- name: clickhouse_sink
|
||||
|
||||
etl_proxy_event_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: proxy_event.yaml.j2
|
||||
kafka_source_topic: PROXY-EVENT
|
||||
kafka_sink_topic: PROXY-EVENT
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_session_record_kafka_to_clickhouse:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 3072m
|
||||
taskmanager.memory.jvm-metaspace.size: 128m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
template: session_record.yaml.j2
|
||||
kafka_source_topic: SESSION-RECORD
|
||||
kafka_sink_topic: SESSION-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_transaction_record_kafka_to_clickhouse:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 3072m
|
||||
taskmanager.memory.jvm-metaspace.size: 128m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
template: transaction_record.yaml.j2
|
||||
kafka_source_topic: TRANSACTION-RECORD
|
||||
kafka_sink_topic: TRANSACTION-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_voip_record_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: voip_record.yaml.j2
|
||||
kafka_source_topic: VOIP-CONVERSATION-RECORD
|
||||
kafka_sink_topic: VOIP-CONVERSATION-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_datapath_telemetry_record_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: datapath_telemetry_record.yaml.j2
|
||||
kafka_source_topic: DATAPATH-TELEMETRY-RECORD
|
||||
kafka_sink_topic: DATAPATH-TELEMETRY-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_traffic_sketch_metric_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: traffic_sketch_metric.yaml.j2
|
||||
kafka_source_topic: TRAFFIC-SKETCH-METRIC
|
||||
kafka_sink_topic: TRAFFIC-SKETCH-METRIC
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
Reference in New Issue
Block a user