增加24.09Druid任务和安装包全局配置
This commit is contained in:
39
config-templates/configuration/config.yml
Normal file
39
config-templates/configuration/config.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
#============================Galaxy OLAP Configuration===============================#
|
||||
#The default installation location
|
||||
deploy_dir: /opt/tsg/olap
|
||||
|
||||
#The default data storage location,use storing application data,logs and configuration files
|
||||
data_dir: /data/tsg/olap
|
||||
|
||||
#Use commas (,) to separate the network segments that the firewall allows to access
|
||||
allowed_ips: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
|
||||
|
||||
#============================Keepalived Configuration===============================#
|
||||
#Specify the interface virtual IP address.It used to configure high availability for OLAP query engine
|
||||
#Only one IP address can be configured on the same network segment
|
||||
#The following three ip addresses must be unique
|
||||
vrrp_instance:
|
||||
default:
|
||||
virtual_ipaddress: 192.168.45.102
|
||||
interface: eth0
|
||||
virtual_router_id: 61
|
||||
oss:
|
||||
virtual_ipaddress: 192.168.45.102
|
||||
interface: eth0
|
||||
virtual_router_id: 62
|
||||
|
||||
#============================OSS Configuration===============================#
|
||||
#HOS token
|
||||
hos_token: fea0ee76be8147bd9b53ad995b7ef603
|
||||
|
||||
#HOS token(加密)
|
||||
encrypted_hos_token: M+0IMYS1+XENltUe585ahhqwY+QZJmnvtxML1vXExB9aO+CPT3GfsP4rtbVuWXpf
|
||||
|
||||
#===========================Central Management Settings==============================#
|
||||
#Central Management Server IP, Used for Dos detection task get policy.
|
||||
cm_api: 192.168.44.3
|
||||
|
||||
#Central Management Server Token, Each environment Token is different.
|
||||
#Requires communication with CM developers.
|
||||
cm_api_token: aa2bdec5518ad131f71944b13ce5c298&1&
|
||||
|
||||
141
config-templates/configuration/galaxy-data-platform.yml
Normal file
141
config-templates/configuration/galaxy-data-platform.yml
Normal file
@@ -0,0 +1,141 @@
|
||||
zookeeper:
|
||||
#Running memory of the Zookeeper.
|
||||
java_opts: -Xmx2024m -Xms1024m
|
||||
|
||||
mariadb:
|
||||
#Used to cache data and index data from tables in the InnoDB storage engine.
|
||||
innodb_buffer_pool_size: 512M
|
||||
|
||||
nacos:
|
||||
#Running memory of the Nacos.
|
||||
java_opt: '-Xmx1024m -Xms1024m -Xmn256m'
|
||||
|
||||
druid:
|
||||
broker:
|
||||
#Running memory of the Druid-Broker.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
#Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
|
||||
MaxDirectMemorySize: 512m
|
||||
#This specifies a buffer size (less than 2GiB), for the storage of intermediate results
|
||||
druid.processing.buffer.sizeBytes: 50000000
|
||||
#The number of direct memory buffers available for merging query results.
|
||||
druid.processing.numMergeBuffers: 4
|
||||
#The number of processing threads to have available for parallel processing of segments.
|
||||
druid.processing.numThreads: 5
|
||||
coordinator:
|
||||
#Running memory of the Druid-Coordinator.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
historical:
|
||||
#Running memory of the Druid-Historical.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
#The size of the process's temporary cache data on disk
|
||||
druid.segmentCache.locations: 300000000000
|
||||
#Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
|
||||
MaxDirectMemorySize: 1024m
|
||||
#This specifies a buffer size (less than 2GiB), for the storage of intermediate results
|
||||
druid.processing.buffer.sizeBytes: 50000000
|
||||
#The number of direct memory buffers available for merging query results.
|
||||
druid.processing.numMergeBuffers: 4
|
||||
#The number of processing threads to have available for parallel processing of segments.
|
||||
druid.processing.numThreads: 5
|
||||
middlemanager:
|
||||
#Running memory of the Druid-Middlemanager.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
druid.indexer.fork.property.druid.processing.numMergeBuffers: 2
|
||||
druid.indexer.fork.property.druid.processing.buffer.sizeBytes: 20000000
|
||||
druid.indexer.fork.property.druid.processing.numThreads: 1
|
||||
|
||||
hadoop:
|
||||
namenode:
|
||||
#Running memory of the Hadoop Namenode.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#The number of Namenode RPC server threads that listen to requests from clients.
|
||||
dfs.namenode.handler.count: 30
|
||||
datanode:
|
||||
#Running memory of the Hadoop Datanode.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#The number of server threads for the datanode.
|
||||
dfs.datanode.handler.count: 40
|
||||
journalnode:
|
||||
#Running memory of the Hadoop JournalNode.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
zkfc:
|
||||
#Running memory of the Hadoop DFSZKFailoverController.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
secondarynamenode:
|
||||
#Running memory of the standalone Hadoop SecondaryNamenode.
|
||||
java_opt: '-Xmx512m -Xms512m'
|
||||
yarn:
|
||||
resourcemanager:
|
||||
#Running memory of the Hadoop ResourceManager.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
nodemanager:
|
||||
#Running memory of the Hadoop NodeManager.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#Amount of physical memory, in MB, that can be allocated for containers.
|
||||
yarn.nodemanager.resource.memory-mb: 16384
|
||||
#The maximum allocation for every container request at the RM in MBs.
|
||||
yarn.scheduler.maximum-allocation-mb: 16384
|
||||
#Number of vcores that can be allocated for containers. This is used by the RM scheduler when allocating resources for containers.
|
||||
yarn.nodemanager.resource.cpu-vcores: 48
|
||||
#The maximum allocation for every container request at the RM in terms of virtual CPU cores.
|
||||
yarn.scheduler.maximum-allocation-vcores: 48
|
||||
|
||||
flink:
|
||||
#Total Process Memory size for the JobManager.
|
||||
jobmanager.memory.process.size: "{{ '1024M' if groups.hadoop|length > 1 else '2048M' }}"
|
||||
#Total Process Memory size for the TaskExecutors.
|
||||
taskmanager.memory.process.size: "{{ '1024M' if groups.hadoop|length > 1 else '12288M' }}"
|
||||
#This is the size of off-heap memory managed for sorting, hash tables, caching of intermediate results and state backend.
|
||||
taskmanager.memory.managed.size: "{{ '128M' if groups.hadoop|length > 1 else '512M' }}"
|
||||
#Framework Off-Heap Memory size for TaskExecutors. This is the size of off-heap memory reserved for TaskExecutor framework
|
||||
taskmanager.memory.framework.off-heap.size: "{{ '128M' if groups.hadoop|length > 1 else '256M' }}"
|
||||
#JVM Metaspace Size for the TaskExecutors.
|
||||
taskmanager.memory.jvm-metaspace.size: "{{ '256M' if groups.hadoop|length > 1 else '1024M' }}"
|
||||
#Max Network Memory size for TaskExecutors. Network Memory is off-heap memory reserved for ShuffleEnvironment.
|
||||
taskmanager.memory.network.max: 256M
|
||||
#The number of parallel operator or user function instances that a single TaskManager can run.
|
||||
#This value is typically proportional to the number of physical CPU cores that the TaskManager's machine has (e.g., equal to the number of cores, or half the number of cores).
|
||||
taskmanager.numberOfTaskSlots: "{{ '1' if groups.hadoop|length > 1 else '48' }}"
|
||||
|
||||
|
||||
hbase:
|
||||
common:
|
||||
#The HBase resource isolation function is used to group tables for storage.
|
||||
enable_rsgroup: false
|
||||
hmaster:
|
||||
#Running memory of the HBase HMaster.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
regionserver:
|
||||
#Running memory of the HBase HRegionserver.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#This defines the number of threads the region server keeps open to serve requests to tables,It should generally be set to (number of cores - 1)
|
||||
hbase.regionserver.handler.count: 40
|
||||
#If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
|
||||
hbase.hregion.max.filesize: 107374182400
|
||||
#Indicates the memory used by all read caches. The value can be the actual memory value, expressed in MB
|
||||
hbase.bucketcache.size: 100
|
||||
|
||||
kafka:
|
||||
#Running memory of the Kafka.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#The minimum age of a log file to be eligible for deletion due to age
|
||||
log.retention.hours: 168
|
||||
#A size-based retention policy for logs,unit byte
|
||||
log.retention.bytes: 10737418240
|
||||
|
||||
clickhouse:
|
||||
#Limit on total memory usage. Zero means Unlimited.
|
||||
max_server_memory_usage: 30000000000
|
||||
#Sets the number of threads performing background merges and mutations for tables with MergeTree engines.
|
||||
background_pool_size: 16
|
||||
|
||||
hos:
|
||||
#Running memory of the Kafka.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#Download files quickly,Used for HBase with a memory larger than 20GB.open: 1 , close: 0
|
||||
isQuickDownloadFile: 0
|
||||
#Whether to enable SSL.open: 1 , close: 0
|
||||
enable_ssl: 0
|
||||
#nacos contains the name of the namespace where the configuration is stored
|
||||
nacos.config.namespace: prod
|
||||
96
config-templates/configuration/hosts
Normal file
96
config-templates/configuration/hosts
Normal file
@@ -0,0 +1,96 @@
|
||||
#==============================================================================
|
||||
# Basic Components
|
||||
#
|
||||
# Orchestration & Coordinator & Configuration & Cold Storage
|
||||
#==============================================================================
|
||||
|
||||
#The cluster use master-master replication mode,maximum 2 servers.
|
||||
[mariadb]
|
||||
192.168.45.102
|
||||
#Apache Zookeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services.
|
||||
#The cluster mode at least 3 servers,The number of nodes must be odd,Like 3/5 nodes.
|
||||
[zookeeper]
|
||||
192.168.45.102
|
||||
#Alibaba Nacos an easy-to-use dynamic service discovery, configuration and service management platform
|
||||
#The cluster mode at least 3 servers,Multi-node HA mode.
|
||||
[nacos]
|
||||
192.168.45.102
|
||||
|
||||
# Apache Hadoop At least 3 servers are required in a cluster.
|
||||
# ## HDFS (Hadoop Distributed File System): The cluster setup includes two NameNodes for high availability and a specified number of DataNodes, which handle the storage and retrieval of data across the distributed system.
|
||||
# ## Hadoop YARN: This is the resource management and job scheduling component of the Hadoop framework. A YARN cluster is composed of two ResourceManagers (RMs) for high availability and a specified number of NodeManagers (NMs).YARN facilitates the execution of distributed processing tasks and provides the runtime environment for Apache Flink and Groot-Stream.
|
||||
# ## Apache Flink: Flink integrates with the Hadoop ecosystem, leveraging YARN for resource allocation and providing real-time data processing capabilities.
|
||||
[hadoop]
|
||||
192.168.45.102
|
||||
|
||||
#==============================================================================
|
||||
# BigData Processing Components
|
||||
#
|
||||
# Big data is a term that refers to the massive volume, variety, and velocity of data that is generated from various sources and needs to be stored, processed, and analyzed efficiently.
|
||||
# The Big Data processing component is used to provide a platform for fast and efficient processing
|
||||
#==============================================================================
|
||||
|
||||
#Apache Kafka is a distributed event streaming platform,used for high-performance data pipelines, streaming analytics.
|
||||
#The cluster mode at least 3 servers,By default install CMAK(Management tool) on the first server.
|
||||
[kafka]
|
||||
192.168.45.102
|
||||
|
||||
#==============================================================================
|
||||
# Analytic Storage Components
|
||||
#
|
||||
# This is a data storage solution designed to support large-scale data analysis and data mining workloads.
|
||||
# The analytic Storage component it offers high performance, scalability, and flexibility to meet the demands of processing vast amounts of structured and unstructured data.
|
||||
#==============================================================================
|
||||
|
||||
#Apache HBase is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware
|
||||
#The cluster mode at least 3 servers,A HBase cluster consists of three HMaster and a certain number of HRegionServer node.
|
||||
[hbase]
|
||||
192.168.45.102
|
||||
|
||||
#Apache Druid is a high performance, real-time analytics database that delivers sub-second queries on streaming and batch data at scale and under load.
|
||||
#The cluster mode at least 3 servers,A Druid cluster consists of two master/query and a certain number of worker node.
|
||||
[druid]
|
||||
192.168.45.102
|
||||
|
||||
#Yandex ClickHouse is the fastest and most resource efficient open-source database for real-time apps and analytics.
|
||||
#The cluster mode at least 3 servers,A Clickhouse cluster consists of two query and a certain number of data node.
|
||||
[clickhouse]
|
||||
192.168.45.102
|
||||
|
||||
#ArangoDB is a scalable graph database system to drive value from connected data, faster.
|
||||
#Only support single server deployment.
|
||||
[arangodb]
|
||||
192.168.45.102
|
||||
|
||||
#==============================================================================
|
||||
# OLAP Self-research service
|
||||
#
|
||||
#==============================================================================
|
||||
|
||||
#The default proxy,Includes the Nginx/Keepalived,If it is a standalone mode, only Nginx.
|
||||
#A maximum of two nodes.
|
||||
[loadbalancer]
|
||||
192.168.45.102
|
||||
|
||||
#Galaxy-hos-service is a distributed object storage service.
|
||||
#Include components:Keepalived/Nginx/Galaxy-hos-service,If it is a standalone mode, only Galaxy-hos-service/Nginx.
|
||||
#The cluster mode at least 2 servers,keepalived and nginx services are deployed on the first two nodes by default.
|
||||
[galaxy_hos_service]
|
||||
192.168.45.102
|
||||
|
||||
|
||||
#The query gateway,Provides a unified query entry
|
||||
[galaxy_qgw_service]
|
||||
192.168.45.102
|
||||
|
||||
#A lightweight distributed task scheduling framework.
|
||||
#Include components: Galaxy-job-admin/Galaxy-job-executor
|
||||
[galaxy_job_service]
|
||||
192.168.45.102
|
||||
|
||||
#The report execution service.
|
||||
[saved_query_scheduler]
|
||||
192.168.45.102
|
||||
|
||||
|
||||
|
||||
215
config-templates/configuration/tsg-olap.yml
Normal file
215
config-templates/configuration/tsg-olap.yml
Normal file
@@ -0,0 +1,215 @@
|
||||
config_namespace: "prod"
|
||||
|
||||
# Name of the data center
|
||||
data_center_name: xxg
|
||||
data_center_id_num: 1
|
||||
|
||||
galaxy_qgw_service:
|
||||
# Running memory of the Galaxy-qgw-service.
|
||||
java_opts: "-Xms1024m -Xmx3120m -XX:+ExitOnOutOfMemoryError"
|
||||
|
||||
galaxy_job_service:
|
||||
# Running memory of the Galaxy-job-admin.
|
||||
admin_java_opts: "-Xms512m -Xmx1024m"
|
||||
# Running memory of the Galaxy-job-executor.
|
||||
executor_java_opts: "-Xms512m -Xmx1024m"
|
||||
|
||||
saved_query_scheduler:
|
||||
# Running memory of the saved-query-scheduler.
|
||||
java_opts: "-Xms512m -Xmx1024m"
|
||||
|
||||
druid:
|
||||
# Druid job parallelism
|
||||
index_kafka_statistics_rule:
|
||||
taskCount: 1
|
||||
druid.indexer.runner.javaOpts: "-server -Xms1g -Xmx2g -XX:MaxDirectMemorySize=1g"
|
||||
index_kafka_application_protocol_stat:
|
||||
taskCount: 1
|
||||
index_kafka_dos_protection_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_dos_protection_rule_metric:
|
||||
taskCount: 1
|
||||
index_kafka_dos_sketch_top_server_ip:
|
||||
taskCount: 1
|
||||
index_kafka_monitor_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_object_statistics:
|
||||
taskCount: 1
|
||||
index_kafka_proxy_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_security_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_service_chaining_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_service_function_status:
|
||||
taskCount: 1
|
||||
index_kafka_statistics_rule_hits:
|
||||
taskCount: 1
|
||||
index_kafka_top_client_countries:
|
||||
taskCount: 1
|
||||
index_kafka_top_client_ips:
|
||||
taskCount: 1
|
||||
index_kafka_top_external_ips:
|
||||
taskCount: 1
|
||||
index_kafka_top_internal_ips:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_countries:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_domains:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_fqdns:
|
||||
taskCount: 1
|
||||
index_kafka_top_server_ips:
|
||||
taskCount: 1
|
||||
index_kafka_traffic_general_stat:
|
||||
taskCount: 1
|
||||
index_kafka_traffic_shaping_rule_hits:
|
||||
taskCount: 1
|
||||
|
||||
# default value
|
||||
default_init:
|
||||
flink:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 2048m
|
||||
taskmanager.memory.jvm-metaspace.size: 256m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
topology: |
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [etl_processor]
|
||||
- name: etl_processor
|
||||
downstream: [clickhouse_sink]
|
||||
- name: clickhouse_sink
|
||||
|
||||
flink:
|
||||
app-protocol-stat-traffic-merge:
|
||||
agg_app_protocol_traffic:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 2048m
|
||||
taskmanager.memory.jvm-metaspace.size: 256m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
template: agg_app_protocol_traffic
|
||||
|
||||
dos-detection:
|
||||
detection_dos_attack:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: detection_dos_attack
|
||||
#---------------------------------------------file chunk
|
||||
file-chunk-combiner:
|
||||
agg_traffic_eml_file_chunk_combiner:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: agg_traffic_file_chunk_combiner
|
||||
kafka_source_topic: TRAFFIC-EML-FILE-STREAM-RECORD
|
||||
hos_sink_bucket: traffic_eml_file_bucket
|
||||
combiner_window_parallelism: 1
|
||||
hos_sink_parallelism: 1
|
||||
agg_traffic_http_file_chunk_combiner:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: agg_traffic_file_chunk_combiner
|
||||
kafka_source_topic: TRAFFIC-HTTP-FILE-STREAM-RECORD
|
||||
hos_sink_bucket: traffic_http_file_bucket
|
||||
combiner_window_parallelism: 1
|
||||
hos_sink_parallelism: 1
|
||||
agg_traffic_policy_capture_file_chunk_combiner:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: agg_traffic_file_chunk_combiner
|
||||
kafka_source_topic: TRAFFIC-POLICY-CAPTURE-FILE-STREAM-RECORD
|
||||
hos_sink_bucket: traffic_policy_capture_file_bucket
|
||||
combiner_window_parallelism: 1
|
||||
hos_sink_parallelism: 1
|
||||
agg_traffic_rtp_file_chunk_combiner:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: agg_traffic_file_chunk_combiner
|
||||
kafka_source_topic: TRAFFIC-RTP-FILE-STREAM-RECORD
|
||||
hos_sink_bucket: traffic_rtp_file_bucket
|
||||
combiner_window_parallelism: 1
|
||||
hos_sink_parallelism: 1
|
||||
|
||||
sip-rtp-correlation:
|
||||
correlation_sip_rtp_session:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: correlation_sip_rtp_session
|
||||
#---------------------------------------------------------------grootstream
|
||||
groot-stream:
|
||||
dos_event_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: dos_event.yaml.j2
|
||||
kafka_source_topic: DOS-EVENT
|
||||
kafka_sink_topic: DOS-EVENT
|
||||
topology: |
|
||||
topology:
|
||||
- name: kafka_source
|
||||
downstream: [clickhouse_sink]
|
||||
- name: clickhouse_sink
|
||||
|
||||
etl_proxy_event_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: proxy_event.yaml.j2
|
||||
kafka_source_topic: PROXY-EVENT
|
||||
kafka_sink_topic: PROXY-EVENT
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_session_record_kafka_to_clickhouse:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 3072m
|
||||
taskmanager.memory.jvm-metaspace.size: 128m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
template: session_record.yaml.j2
|
||||
kafka_source_topic: SESSION-RECORD
|
||||
kafka_sink_topic: SESSION-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_transaction_record_kafka_to_clickhouse:
|
||||
env:
|
||||
parallelism: 1
|
||||
taskmanager.memory.process.size: 3072m
|
||||
taskmanager.memory.jvm-metaspace.size: 128m
|
||||
taskmanager.numberOfTaskSlots: 1
|
||||
taskmanager.memory.framework.off-heap.size: 512m
|
||||
config:
|
||||
template: transaction_record.yaml.j2
|
||||
kafka_source_topic: TRANSACTION-RECORD
|
||||
kafka_sink_topic: TRANSACTION-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_voip_record_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: voip_record.yaml.j2
|
||||
kafka_source_topic: VOIP-CONVERSATION-RECORD
|
||||
kafka_sink_topic: VOIP-CONVERSATION-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_datapath_telemetry_record_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: datapath_telemetry_record.yaml.j2
|
||||
kafka_source_topic: DATAPATH-TELEMETRY-RECORD
|
||||
kafka_sink_topic: DATAPATH-TELEMETRY-RECORD
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
etl_traffic_sketch_metric_kafka_to_clickhouse:
|
||||
env: "{{ default_init.flink.env }}"
|
||||
config:
|
||||
template: traffic_sketch_metric.yaml.j2
|
||||
kafka_source_topic: TRAFFIC-SKETCH-METRIC
|
||||
kafka_sink_topic: TRAFFIC-SKETCH-METRIC
|
||||
topology: "{{ default_init.flink.config.topology }}"
|
||||
|
||||
Reference in New Issue
Block a user