提交各组件部署Ansible剧本初版
This commit is contained in:
11
Apache Druid/26.0.0/druid/hosts
Normal file
11
Apache Druid/26.0.0/druid/hosts
Normal file
@@ -0,0 +1,11 @@
|
||||
[zookeeper]
|
||||
192.168.45.102
|
||||
|
||||
[mariadb]
|
||||
192.168.45.102
|
||||
|
||||
[hdfs]
|
||||
|
||||
[druid]
|
||||
192.168.45.102
|
||||
|
||||
6
Apache Druid/26.0.0/druid/install.yml
Normal file
6
Apache Druid/26.0.0/druid/install.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
- hosts: druid
|
||||
remote_user: root
|
||||
roles:
|
||||
- role
|
||||
vars_files:
|
||||
- role/vars/main.yml
|
||||
44
Apache Druid/26.0.0/druid/role/defaults/main.yml
Normal file
44
Apache Druid/26.0.0/druid/role/defaults/main.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
#The default installation location
|
||||
deploy_dir: /data/olap
|
||||
|
||||
#The default data storage location,use storing application data,logs and configuration files
|
||||
data_dir: /data/olap
|
||||
|
||||
druid:
|
||||
common:
|
||||
druid.zk.service.host: '{% for dev_info in groups.zookeeper -%}{% if loop.last -%}{{dev_info}}:2181{%- else %}{{dev_info}}:2181,{%- endif %}{%- endfor %}'
|
||||
druid.metadata.storage.connector.connectURI: 'jdbc:mysql://{{ vrrp_instance.default.virtual_ipaddress }}:3306/druid'
|
||||
druid.metadata.storage.connector.password: '{{ mariadb_default_pin }}'
|
||||
broker:
|
||||
#Running memory of the Druid-Broker.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
#Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
|
||||
MaxDirectMemorySize: 512m
|
||||
#This specifies a buffer size (less than 2GiB), for the storage of intermediate results
|
||||
druid.processing.buffer.sizeBytes: 50000000
|
||||
#The number of direct memory buffers available for merging query results.
|
||||
druid.processing.numMergeBuffers: 4
|
||||
#The number of processing threads to have available for parallel processing of segments.
|
||||
druid.processing.numThreads: 5
|
||||
coordinator:
|
||||
#Running memory of the Druid-Coordinator.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
historical:
|
||||
#Running memory of the Druid-Historical.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
#The size of the process's temporary cache data on disk
|
||||
druid.segmentCache.locations: 300000000000
|
||||
#Worker tasks also use off-heap ("direct") memory. Set the amount of direct memory available (-XX:MaxDirectMemorySize) to at least (druid.processing.numThreads + 1) * druid.processing.buffer.sizeBytes
|
||||
MaxDirectMemorySize: 512m
|
||||
#This specifies a buffer size (less than 2GiB), for the storage of intermediate results
|
||||
druid.processing.buffer.sizeBytes: 50000000
|
||||
#The number of direct memory buffers available for merging query results.
|
||||
druid.processing.numMergeBuffers: 4
|
||||
#The number of processing threads to have available for parallel processing of segments.
|
||||
druid.processing.numThreads: 5
|
||||
middlemanager:
|
||||
#Running memory of the Druid-Middlemanager.
|
||||
java_opts: -Xmx1024m -Xms1024m
|
||||
druid.indexer.fork.property.druid.processing.numMergeBuffers: 2
|
||||
druid.indexer.fork.property.druid.processing.buffer.sizeBytes: 20000000
|
||||
druid.indexer.fork.property.druid.processing.numThreads: 1
|
||||
BIN
Apache Druid/26.0.0/druid/role/files/conf.zip
Normal file
BIN
Apache Druid/26.0.0/druid/role/files/conf.zip
Normal file
Binary file not shown.
BIN
Apache Druid/26.0.0/druid/role/files/mysql
Normal file
BIN
Apache Druid/26.0.0/druid/role/files/mysql
Normal file
Binary file not shown.
38
Apache Druid/26.0.0/druid/role/handlers/main.yml
Normal file
38
Apache Druid/26.0.0/druid/role/handlers/main.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
- name: Loading Image
|
||||
docker_image:
|
||||
name: '{{ image_name }}'
|
||||
tag: '{{ image_tag }}'
|
||||
load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
|
||||
source: load
|
||||
force_tag: yes
|
||||
force_source: yes
|
||||
timeout: 300
|
||||
|
||||
- name: Stop Container
|
||||
docker_container:
|
||||
name: '{{ container_name }}'
|
||||
state: absent
|
||||
|
||||
- name: Start Container
|
||||
docker_compose:
|
||||
project_src: '{{ deploy_dir }}/{{ container_name }}/'
|
||||
|
||||
- name: Removing Image
|
||||
docker_image:
|
||||
name: '{{ image_name }}'
|
||||
tag: '{{ image_tag }}'
|
||||
state: absent
|
||||
|
||||
- name: Loading Exporter Image
|
||||
docker_image:
|
||||
name: 'druid_exporter'
|
||||
tag: '1.0.0'
|
||||
load_path: '{{ deploy_dir }}/{{ container_name }}/monitor/druid_exporter-1.0.0.tar'
|
||||
source: load
|
||||
force_tag: yes
|
||||
force_source: yes
|
||||
timeout: 300
|
||||
|
||||
- name: Start Exporter Container
|
||||
docker_compose:
|
||||
project_src: '{{ deploy_dir }}/{{ container_name }}/monitor/'
|
||||
156
Apache Druid/26.0.0/druid/role/tasks/deploy.yml
Normal file
156
Apache Druid/26.0.0/druid/role/tasks/deploy.yml
Normal file
@@ -0,0 +1,156 @@
|
||||
- block:
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Druid Cluster mode at least 3 nodes,please checking configurations/hosts -> druid"
|
||||
when: node_nums < (min_cluster_num)
|
||||
|
||||
- name: Check the Zookeeper status
|
||||
shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
|
||||
register: port_out
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: port_out.stdout != '1'
|
||||
|
||||
- name: Checking Hadoop DataNode status
|
||||
shell: source /etc/profile && hadoop dfsadmin -report | grep "Live datanodes" | grep -E -o "[0-9]"
|
||||
async: 10
|
||||
register: datanode_out
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.hdfs[0] }}"
|
||||
|
||||
- name: Checking Hadoop NameNode status
|
||||
shell: source /etc/profile && hadoop dfsadmin -report |grep 50010 | wc -l
|
||||
async: 10
|
||||
register: namenode_out
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.hdfs[0] }}"
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "If the dependency test fails, check whether the Hadoop cluster is normal"
|
||||
when: datanode_out.stdout <= '1' and namenode_out.stdout <= '1'
|
||||
|
||||
|
||||
- name: Creating directory
|
||||
file:
|
||||
state: directory
|
||||
path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
|
||||
with_items:
|
||||
- { dir: 'var' }
|
||||
- { dir: 'log' }
|
||||
- { dir: 'monitor' }
|
||||
|
||||
- name: Copying config
|
||||
unarchive:
|
||||
src: 'files/conf.zip'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/'
|
||||
|
||||
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
|
||||
copy:
|
||||
src: '{{ role_path }}/../../../software-packages/{{ image_name }}-{{ image_tag }}.tar'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/'
|
||||
force: true
|
||||
notify:
|
||||
- Loading Image
|
||||
|
||||
- name: copying druid config files
|
||||
template:
|
||||
src: '{{ item.src }}'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/conf/druid/cluster/{{ item.dest }}'
|
||||
backup: false
|
||||
with_items:
|
||||
- { src: 'common.runtime.properties.j2', dest: '_common/common.runtime.properties' }
|
||||
- { src: 'broker_runtime.j2', dest: 'query/broker/runtime.properties' }
|
||||
- { src: 'broker_jvm.j2', dest: 'query/broker/jvm.config' }
|
||||
- { src: 'historical_runtime.j2', dest: 'data/historical/runtime.properties' }
|
||||
- { src: 'historical_jvm.j2', dest: 'data/historical/jvm.config' }
|
||||
- { src: 'middleManager_jvm.j2', dest: 'data/middleManager/jvm.config' }
|
||||
- { src: 'middleManager_runtime.properties.j2', dest: 'data/middleManager/runtime.properties' }
|
||||
- { src: 'coordinator_jvm.j2', dest: 'master/coordinator-overlord/jvm.config' }
|
||||
- { src: 'router_runtime.properties.j2', dest: 'query/router/runtime.properties' }
|
||||
|
||||
- name: Fetching Hadoop config files to /tmp
|
||||
ansible.builtin.fetch:
|
||||
src: "{{ deploy_dir }}/hadoop-2.7.1/etc/hadoop/{{ item.filename }}"
|
||||
dest: "/tmp/"
|
||||
flat: yes
|
||||
loop: "{{ hadoop_config_files }}"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.hdfs[0] }}"
|
||||
|
||||
- name: Copying Hadoop config files to other nodes
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/{{ item.filename }}"
|
||||
dest: "{{ deploy_dir }}/{{ container_name }}/conf/druid/cluster/_common/"
|
||||
loop: "{{ hadoop_config_files }}"
|
||||
|
||||
- name: Create a new database with name {{ druid_database }}
|
||||
shell: mysql -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "create database {{ druid_database }} default character set utf8mb4 collate utf8mb4_general_ci;"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.mariadb[0] }}"
|
||||
|
||||
- block:
|
||||
- name: Setting startup_mode variable
|
||||
set_fact: startup_mode="cluster-all-server"
|
||||
|
||||
- name: Copying Druid docker-compose
|
||||
template:
|
||||
src: 'docker-compose.yml.j2'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
|
||||
mode: 0644
|
||||
notify:
|
||||
- Loading Image
|
||||
- Start Container
|
||||
when: node_nums <= (min_cluster_num)
|
||||
|
||||
- block:
|
||||
- name: Setting startup_mode variable
|
||||
set_fact: startup_mode="cluster-query-server"
|
||||
|
||||
- name: Copying Druid docker-compose
|
||||
template:
|
||||
src: 'docker-compose.yml.j2'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
|
||||
mode: 0644
|
||||
notify:
|
||||
- Loading Image
|
||||
- Start Container
|
||||
when: node_nums > (min_cluster_num) and inventory_hostname in groups['druid'][:2]
|
||||
|
||||
- block:
|
||||
- name: Setting startup_mode variable
|
||||
set_fact: startup_mode="cluster-data-server"
|
||||
|
||||
- name: Copying Druid docker-compose
|
||||
template:
|
||||
src: 'docker-compose.yml.j2'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
|
||||
mode: 0644
|
||||
notify:
|
||||
- Loading Image
|
||||
- Start Container
|
||||
when: node_nums > (min_cluster_num) and inventory_hostname not in groups['druid'][:2]
|
||||
|
||||
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
|
||||
copy:
|
||||
src: '{{ role_path }}/../../../software-packages/druid_exporter-1.0.0.tar'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
|
||||
force: true
|
||||
notify:
|
||||
- Loading Exporter Image
|
||||
|
||||
- name: Config exporter config files
|
||||
template:
|
||||
src: 'docker-compose_exporter.yml.j2'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml'
|
||||
mode: 0644
|
||||
notify:
|
||||
- Start Exporter Container
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
19
Apache Druid/26.0.0/druid/role/tasks/main.yml
Normal file
19
Apache Druid/26.0.0/druid/role/tasks/main.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
- block:
|
||||
- include: uninstall.yml
|
||||
- include: deploy.yml
|
||||
- include: status-check.yml
|
||||
when: (operation) == "install" and (groups.druid|length) > 1
|
||||
|
||||
- block:
|
||||
- include: uninstall.yml
|
||||
when: (operation) == "uninstall" and (groups.druid|length) > 1
|
||||
|
||||
- block:
|
||||
- include: standalone/uninstall.yml
|
||||
- include: standalone/deploy.yml
|
||||
- include: status-check.yml
|
||||
when: (operation) == "install" and (groups.druid|length) == 1
|
||||
|
||||
- block:
|
||||
- include: standalone/uninstall.yml
|
||||
when: (operation) == "uninstall" and (groups.druid|length) == 1
|
||||
93
Apache Druid/26.0.0/druid/role/tasks/standalone/deploy.yml
Normal file
93
Apache Druid/26.0.0/druid/role/tasks/standalone/deploy.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
- name: Setting node_nums variable
|
||||
set_fact: node_nums="{{groups.druid|length}}"
|
||||
|
||||
- block:
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Druid Standanloe mode at max 1 nodes,please checking configurations/hosts -> druid"
|
||||
when: node_nums != '1'
|
||||
|
||||
- name: Check the Zookeeper status
|
||||
shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
|
||||
register: port_out
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: port_out.stdout != '1'
|
||||
|
||||
- name: Creating directory
|
||||
file:
|
||||
state: directory
|
||||
path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
|
||||
with_items:
|
||||
- { dir: 'var' }
|
||||
- { dir: 'log' }
|
||||
- { dir: 'monitor' }
|
||||
|
||||
- name: Copying config
|
||||
unarchive:
|
||||
src: 'files/conf.zip'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/'
|
||||
|
||||
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
|
||||
copy:
|
||||
src: 'files/{{ image_name }}-{{ image_tag }}.tar'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/'
|
||||
force: true
|
||||
notify:
|
||||
- Loading Image
|
||||
|
||||
- name: copying druid config files
|
||||
template:
|
||||
src: '{{ item.src }}'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/conf/druid/single-server/medium/{{ item.dest }}'
|
||||
backup: false
|
||||
with_items:
|
||||
- { src: 'common.runtime.properties.j2', dest: '_common/common.runtime.properties' }
|
||||
- { src: 'broker_runtime.j2', dest: 'broker/runtime.properties' }
|
||||
- { src: 'broker_jvm.j2', dest: 'broker/jvm.config' }
|
||||
- { src: 'historical_runtime.j2', dest: 'historical/runtime.properties' }
|
||||
- { src: 'historical_jvm.j2', dest: 'historical/jvm.config' }
|
||||
- { src: 'middleManager_jvm.j2', dest: 'middleManager/jvm.config' }
|
||||
- { src: 'middleManager_runtime.properties.j2', dest: 'middleManager/runtime.properties' }
|
||||
- { src: 'coordinator_jvm.j2', dest: 'coordinator-overlord/jvm.config' }
|
||||
- { src: 'router_runtime.properties.j2', dest: 'router/runtime.properties' }
|
||||
|
||||
- name: Create a new database with name {{ druid_database }}
|
||||
shell: mysql -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "create database {{ druid_database }} default character set utf8mb4 collate utf8mb4_general_ci;"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.mariadb[0] }}"
|
||||
|
||||
- name: Setting startup_mode variable
|
||||
set_fact: startup_mode="single-server-medium"
|
||||
|
||||
- name: Copying Druid docker-compose
|
||||
template:
|
||||
src: 'docker-compose.yml.j2'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml'
|
||||
mode: 0644
|
||||
notify:
|
||||
- Loading Image
|
||||
- Start Container
|
||||
|
||||
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
|
||||
copy:
|
||||
src: 'files/druid_exporter-1.0.0.tar'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
|
||||
force: true
|
||||
notify:
|
||||
- Loading Exporter Image
|
||||
|
||||
- name: Config exporter config files
|
||||
template:
|
||||
src: 'docker-compose_exporter.yml.j2'
|
||||
dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml'
|
||||
mode: 0644
|
||||
notify:
|
||||
- Start Exporter Container
|
||||
|
||||
- meta: flush_handlers
|
||||
@@ -0,0 +1,50 @@
|
||||
- name: copy mysql to /usr/bin/
|
||||
copy:
|
||||
src: 'files/mysql'
|
||||
dest: '/usr/bin/'
|
||||
force: true
|
||||
mode: 0755
|
||||
|
||||
- name: Stopping and removing {{ container_name }} container
|
||||
docker_container:
|
||||
name: '{{ container_name }}'
|
||||
state: absent
|
||||
|
||||
- name: Removing old {{ image_name }} image
|
||||
docker_image:
|
||||
name: '{{ image_name }}'
|
||||
tag: '{{ image_tag }}'
|
||||
state: absent
|
||||
|
||||
- name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
|
||||
file:
|
||||
path: '{{ deploy_dir }}/{{ container_name }}'
|
||||
state: absent
|
||||
|
||||
- name: check database
|
||||
shell: mysql -s -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "DROP DATABASE IF EXISTS {{ druid_database }};"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.druid[0] }}"
|
||||
|
||||
- name: Checking ZooKeeper has druid nodes
|
||||
shell: "docker exec -it zookeeper zkCli.sh ls / | grep druid | wc -l"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
register: has_zknode
|
||||
|
||||
- name: Delete druid nodes in ZooKeeper
|
||||
shell: "docker exec -it zookeeper zkCli.sh rmr /druid"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
when: has_zknode.stdout >= '1'
|
||||
|
||||
- name: Check if the Druid service already exists
|
||||
shell: ps -ef |grep "org.apache.druid.cli.Main server" | grep -v grep | grep -v json | wc -l
|
||||
register: check_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Uninstall failed, the Druid process is still running, please check!"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: check_out.stdout >= '1'
|
||||
41
Apache Druid/26.0.0/druid/role/tasks/status-check.yml
Normal file
41
Apache Druid/26.0.0/druid/role/tasks/status-check.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
- name: Waitting for Druid running,60s
|
||||
shell: sleep 60
|
||||
|
||||
- block:
|
||||
- name: Check if the Druid already exists
|
||||
shell: ps -ef | grep -v grep | grep "org.apache.druid.cli.Main server" | wc -l
|
||||
register: process_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Druid on node {{ inventory_hostname }} is not started. Please check"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: process_out.stdout != '5'
|
||||
when: node_nums <= (min_cluster_num)
|
||||
|
||||
- block:
|
||||
- name: Check if the Druid already exists
|
||||
shell: ps -ef | grep -v grep | grep "org.apache.druid.cli.Main server" | wc -l
|
||||
register: process_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Druid on node {{ inventory_hostname }} is not started. Please check"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: process_out.stdout != '3'
|
||||
when: node_nums > (min_cluster_num) and inventory_hostname in groups['druid'][:2]
|
||||
|
||||
- block:
|
||||
- name: Check if the Druid already exists
|
||||
shell: ps -ef | grep -v grep | grep "org.apache.druid.cli.Main server" | wc -l
|
||||
register: process_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Druid on node {{ inventory_hostname }} is not started. Please check"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: process_out.stdout != '2'
|
||||
when: node_nums > (min_cluster_num) and inventory_hostname not in groups['druid'][:2]
|
||||
64
Apache Druid/26.0.0/druid/role/tasks/uninstall.yml
Normal file
64
Apache Druid/26.0.0/druid/role/tasks/uninstall.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
- name: copy mysql to /usr/bin/
|
||||
copy:
|
||||
src: 'files/mysql'
|
||||
dest: '/usr/bin/'
|
||||
force: true
|
||||
mode: 0755
|
||||
|
||||
- block:
|
||||
- name: Stopping and removing {{ container_name }} container
|
||||
docker_container:
|
||||
name: '{{ container_name }}'
|
||||
state: absent
|
||||
|
||||
- name: Removing old {{ image_name }} image
|
||||
docker_image:
|
||||
name: '{{ image_name }}'
|
||||
tag: '{{ image_tag }}'
|
||||
state: absent
|
||||
|
||||
- name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
|
||||
file:
|
||||
path: '{{ deploy_dir }}/{{ container_name }}'
|
||||
state: absent
|
||||
|
||||
- block:
|
||||
- name: check database
|
||||
shell: mysql -s -uroot -p{{ mariadb_default_pin }} -P3306 -h{{ groups.mariadb[0] }} -e "DROP DATABASE IF EXISTS {{ druid_database }};"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.druid[0] }}"
|
||||
|
||||
- name: Checking ZooKeeper has druid nodes
|
||||
shell: "docker exec -it zookeeper zkCli.sh ls / | grep druid | wc -l"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
register: has_zknode
|
||||
|
||||
- name: Delete druid nodes in ZooKeeper
|
||||
shell: "docker exec -it zookeeper zkCli.sh rmr /druid"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
when: has_zknode.stdout >= '1'
|
||||
|
||||
- name: Checking HDFS has Druid folder
|
||||
shell: source /etc/profile && hdfs dfs -ls / | grep druid | wc -l
|
||||
register: folder_exists
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.hdfs[0] }}"
|
||||
|
||||
- name: Delete Druid data folder in HDFS
|
||||
shell: source /etc/profile && hadoop fs -rm -r /druid
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.hdfs[0] }}"
|
||||
when: folder_exists.stdout >= '1'
|
||||
|
||||
- name: Check if the Druid service already exists
|
||||
shell: ps -ef |grep "org.apache.druid.cli.Main server" | grep -v grep | grep -v json | wc -l
|
||||
register: check_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Uninstall failed, the Druid process is still running, please check!"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: check_out.stdout >= '1'
|
||||
9
Apache Druid/26.0.0/druid/role/templates/broker_jvm.j2
Normal file
9
Apache Druid/26.0.0/druid/role/templates/broker_jvm.j2
Normal file
@@ -0,0 +1,9 @@
|
||||
-server
|
||||
{{ druid.broker.java_opts }}
|
||||
-XX:MaxDirectMemorySize={{ druid.broker.MaxDirectMemorySize }}
|
||||
-Duser.timezone=UTC
|
||||
-Dfile.encoding=UTF-8
|
||||
-Djava.io.tmpdir=var/tmp
|
||||
-Dlogfile.name=broker
|
||||
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||
|
||||
41
Apache Druid/26.0.0/druid/role/templates/broker_runtime.j2
Normal file
41
Apache Druid/26.0.0/druid/role/templates/broker_runtime.j2
Normal file
@@ -0,0 +1,41 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
druid.service=druid/broker
|
||||
druid.plaintextPort=8082
|
||||
|
||||
# HTTP server settings
|
||||
druid.server.http.numThreads=60
|
||||
|
||||
# HTTP client settings
|
||||
druid.broker.http.numConnections=50
|
||||
druid.broker.http.maxQueuedBytes=10000000
|
||||
|
||||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes={{ druid.broker['druid.processing.buffer.sizeBytes'] }}
|
||||
druid.processing.numMergeBuffers={{ druid.broker['druid.processing.numMergeBuffers'] }}
|
||||
druid.processing.numThreads={{ druid.broker['druid.processing.numThreads'] }}
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
druid.broker.cache.useCache=false
|
||||
druid.broker.cache.populateCache=false
|
||||
|
||||
druid.query.groupBy.maxMergingDictionarySize=10000000000
|
||||
druid.query.groupBy.maxOnDiskStorage=10000000000
|
||||
@@ -0,0 +1,169 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Extensions specified in the load list will be loaded by Druid
|
||||
# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
|
||||
# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
|
||||
|
||||
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
|
||||
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
|
||||
# More info: https://druid.apache.org/docs/latest/operations/including-extensions.html
|
||||
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches", "druid-multi-stage-query","mysql-metadata-storage","druid-hlld", "druid-hdrhistogram"]
|
||||
|
||||
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
|
||||
# and uncomment the line below to point to your directory.
|
||||
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
|
||||
|
||||
|
||||
#
|
||||
# Hostname
|
||||
#
|
||||
druid.host={{ inventory_hostname }}
|
||||
|
||||
#
|
||||
# Logging
|
||||
#
|
||||
|
||||
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
|
||||
druid.startup.logging.logProperties=true
|
||||
|
||||
#
|
||||
# Zookeeper
|
||||
#
|
||||
|
||||
druid.zk.service.host={{ druid.common['druid.zk.service.host'] }}
|
||||
|
||||
druid.zk.paths.base=/druid
|
||||
|
||||
#
|
||||
# Metadata storage
|
||||
#
|
||||
|
||||
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
|
||||
#druid.metadata.storage.type=derby
|
||||
#druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
|
||||
#druid.metadata.storage.connector.host=localhost
|
||||
#druid.metadata.storage.connector.port=1527
|
||||
|
||||
# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
|
||||
druid.metadata.storage.type=mysql
|
||||
druid.metadata.storage.connector.connectURI={{ druid.common['druid.metadata.storage.connector.connectURI'] }}
|
||||
druid.metadata.storage.connector.user=root
|
||||
druid.metadata.storage.connector.password={{ druid.common['druid.metadata.storage.connector.password'] }}
|
||||
|
||||
# For PostgreSQL:
|
||||
#druid.metadata.storage.type=postgresql
|
||||
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
|
||||
#druid.metadata.storage.connector.user=...
|
||||
#druid.metadata.storage.connector.password=...
|
||||
|
||||
#
|
||||
# Deep storage
|
||||
#
|
||||
|
||||
# For local disk (only viable in a cluster if this is a network mount):
|
||||
{% if groups.druid | length == 1 %}
|
||||
druid.storage.type=local
|
||||
druid.storage.storageDirectory=var/druid/segments
|
||||
{% elif groups.druid | length >= 3 %}
|
||||
# For HDFS:
|
||||
druid.storage.type=hdfs
|
||||
druid.storage.storageDirectory=/druid/segments
|
||||
{% endif %}
|
||||
|
||||
# For S3:
|
||||
#druid.storage.type=s3
|
||||
#druid.storage.bucket=your-bucket
|
||||
#druid.storage.baseKey=druid/segments
|
||||
#druid.s3.accessKey=...
|
||||
#druid.s3.secretKey=...
|
||||
|
||||
#
|
||||
# Indexing service logs
|
||||
#
|
||||
|
||||
# For local disk (only viable in a cluster if this is a network mount):
|
||||
{% if groups.druid | length == 1 %}
|
||||
druid.indexer.logs.type=file
|
||||
druid.indexer.logs.directory=var/druid/indexing-logs
|
||||
{% elif groups.druid | length >= 3 %}
|
||||
# For HDFS:
|
||||
druid.indexer.logs.type=hdfs
|
||||
druid.indexer.logs.directory=/druid/indexing-logs
|
||||
{% endif %}
|
||||
|
||||
druid.indexer.logs.kill.enabled=true
|
||||
druid.indexer.logs.kill.durationToRetain=604800000
|
||||
druid.indexer.logs.kill.delay=21600000
|
||||
|
||||
# For S3:
|
||||
#druid.indexer.logs.type=s3
|
||||
#druid.indexer.logs.s3Bucket=your-bucket
|
||||
#druid.indexer.logs.s3Prefix=druid/indexing-logs
|
||||
|
||||
|
||||
#
|
||||
# Service discovery
|
||||
#
|
||||
|
||||
druid.selectors.indexing.serviceName=druid/overlord
|
||||
druid.selectors.coordinator.serviceName=druid/coordinator
|
||||
|
||||
#
|
||||
# Monitoring
|
||||
#
|
||||
|
||||
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
|
||||
druid.emitter=http
|
||||
druid.emitter.logging.logLevel=info
|
||||
druid.emitter.http.recipientBaseUrl=http://{{ inventory_hostname }}:9903
|
||||
|
||||
# Storage type of double columns
|
||||
# ommiting this will lead to index double as float at the storage layer
|
||||
|
||||
druid.indexing.doubleStorage=double
|
||||
|
||||
#
|
||||
# Security
|
||||
#
|
||||
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password", "password", "key", "token", "pwd"]
|
||||
|
||||
|
||||
#
|
||||
# SQL
|
||||
#
|
||||
druid.sql.enable=true
|
||||
|
||||
#
|
||||
# Lookups
|
||||
#
|
||||
druid.lookup.enableLookupSyncOnStartup=false
|
||||
|
||||
# Planning SQL query when there is aggregate distinct in the statement
|
||||
druid.sql.planner.useGroupingSetForExactDistinct=true
|
||||
|
||||
# Expression processing config
|
||||
druid.expressions.useStrictBooleans=true
|
||||
|
||||
# Http client
|
||||
druid.global.http.eagerInitialization=false
|
||||
|
||||
#Set to false to store and query data in SQL compatible mode. When set to true (legacy mode), null values will be stored as '' for string columns and 0 for numeric columns.
|
||||
druid.generic.useDefaultValueForNull=false
|
||||
|
||||
10
Apache Druid/26.0.0/druid/role/templates/coordinator_jvm.j2
Normal file
10
Apache Druid/26.0.0/druid/role/templates/coordinator_jvm.j2
Normal file
@@ -0,0 +1,10 @@
|
||||
-server
|
||||
{{ druid.coordinator.java_opts }}
|
||||
-XX:+UseG1GC
|
||||
-Duser.timezone=UTC
|
||||
-Dfile.encoding=UTF-8
|
||||
-Djava.io.tmpdir=var/tmp
|
||||
-Dlogfile.name=coordinator
|
||||
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||
-Dderby.stream.error.file=var/druid/derby.log
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
druid-master:
|
||||
image: {{ image_name }}:{{ image_tag }}
|
||||
restart: always
|
||||
container_name: {{ container_name }}
|
||||
privileged: true
|
||||
user: root
|
||||
environment:
|
||||
#cluster-data-server,cluster-query-server,cluster-all-server,single-server-small,single-server-medium,single-server-large,single-server-xlarge
|
||||
MODE: {{ startup_mode }}
|
||||
volumes:
|
||||
- "{{ deploy_dir }}/{{ container_name }}/conf:/{{ component_version }}/conf"
|
||||
- "{{ deploy_dir }}/{{ container_name }}/var:/{{ component_version }}/var"
|
||||
- "{{ deploy_dir }}/{{ container_name }}/log:/{{ component_version }}/log"
|
||||
network_mode: "host"
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
druid_exporter:
|
||||
image: druid_exporter:1.0.0
|
||||
container_name: druid_exporter
|
||||
restart: always
|
||||
ports:
|
||||
- 9903:9903
|
||||
environment:
|
||||
JVM_MEM: "-Xmx1024m -Xms128m"
|
||||
networks:
|
||||
olap:
|
||||
ipv4_address: 172.20.88.11
|
||||
networks:
|
||||
olap:
|
||||
external: true
|
||||
@@ -0,0 +1,9 @@
|
||||
-server
|
||||
{{ druid.historical.java_opts }}
|
||||
-XX:MaxDirectMemorySize={{ druid.historical.MaxDirectMemorySize }}
|
||||
-Duser.timezone=UTC
|
||||
-Dfile.encoding=UTF-8
|
||||
-Djava.io.tmpdir=var/tmp
|
||||
-Dlogfile.name=historical
|
||||
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
druid.service=druid/historical
|
||||
druid.plaintextPort=8083
|
||||
|
||||
# HTTP server threads
|
||||
druid.server.http.numThreads=60
|
||||
|
||||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes={{ druid.historical['druid.processing.buffer.sizeBytes'] }}
|
||||
druid.processing.numMergeBuffers={{ druid.historical['druid.processing.numMergeBuffers'] }}
|
||||
druid.processing.numThreads={{ druid.historical['druid.processing.numThreads'] }}
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Segment storage
|
||||
druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":{{ druid.historical['druid.segmentCache.locations'] }}}]
|
||||
|
||||
# Query cache
|
||||
druid.historical.cache.useCache=true
|
||||
druid.historical.cache.populateCache=true
|
||||
druid.cache.type=caffeine
|
||||
druid.cache.sizeInBytes=256000000
|
||||
|
||||
druid.query.groupBy.maxMergingDictionarySize=10000000000
|
||||
druid.query.groupBy.maxOnDiskStorage=10000000000
|
||||
@@ -0,0 +1,7 @@
|
||||
-server
|
||||
{{ druid.middlemanager.java_opts }}
|
||||
-Duser.timezone=UTC
|
||||
-Dfile.encoding=UTF-8
|
||||
-Djava.io.tmpdir=var/tmp
|
||||
-Dlogfile.name=middleManager
|
||||
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||
@@ -0,0 +1,43 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
druid.service=druid/middleManager
|
||||
druid.plaintextPort=8091
|
||||
|
||||
# Number of tasks per middleManager
|
||||
druid.worker.capacity=200
|
||||
|
||||
# Task launch parameters
|
||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1024m","-Xmx1024m","-XX:MaxDirectMemorySize=1024m","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager","-Dlog4j.configurationFile=conf/druid/cluster/_common/log4j2-task.xml"]
|
||||
|
||||
# HTTP server threads
|
||||
druid.server.http.numThreads=60
|
||||
|
||||
# Processing threads and buffers on Peons
|
||||
druid.indexer.fork.property.druid.processing.numMergeBuffers={{ druid.middlemanager['druid.indexer.fork.property.druid.processing.numMergeBuffers'] }}
|
||||
druid.indexer.fork.property.druid.processing.buffer.sizeBytes={{ druid.middlemanager['druid.indexer.fork.property.druid.processing.buffer.sizeBytes'] }}
|
||||
druid.indexer.fork.property.druid.processing.numThreads={{ druid.middlemanager['druid.indexer.fork.property.druid.processing.numThreads'] }}
|
||||
|
||||
# Hadoop indexing
|
||||
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp
|
||||
|
||||
druid.query.groupBy.maxMergingDictionarySize=10000000000
|
||||
druid.query.groupBy.maxOnDiskStorage=10000000000
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
druid.service=druid/router
|
||||
druid.plaintextPort=8088
|
||||
|
||||
# HTTP proxy
|
||||
druid.router.http.numConnections=50
|
||||
druid.router.http.readTimeout=PT5M
|
||||
druid.router.http.numMaxThreads=100
|
||||
druid.server.http.numThreads=100
|
||||
|
||||
# Service discovery
|
||||
druid.router.defaultBrokerServiceName=druid/broker
|
||||
druid.router.coordinatorServiceName=druid/coordinator
|
||||
|
||||
# Management proxy to coordinator / overlord: required for unified web console.
|
||||
druid.router.managementProxy.enabled=true
|
||||
23
Apache Druid/26.0.0/druid/role/vars/main.yml
Normal file
23
Apache Druid/26.0.0/druid/role/vars/main.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
#镜像名称
|
||||
image_name: druid
|
||||
|
||||
#镜像版本号
|
||||
image_tag: 26.0.0
|
||||
|
||||
#容器名称
|
||||
container_name: druid
|
||||
|
||||
#组件版本
|
||||
component_version: apache-druid-26.0.0
|
||||
|
||||
#最小集群数量
|
||||
min_cluster_num: '3'
|
||||
|
||||
#mysql数据库名称
|
||||
druid_database: druid
|
||||
|
||||
#集群模式下需要用到的hdfs配置文件
|
||||
hadoop_config_files:
|
||||
- { filename: 'hdfs-site.xml' }
|
||||
- { filename: 'core-site.xml' }
|
||||
|
||||
Reference in New Issue
Block a user