diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index d7019c0..5d9139a 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -3,7 +3,80 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -16,44 +89,81 @@
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
+
+
-
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -64,6 +174,21 @@
clickhouse_ssd_path
pol
+ home
+ abnorma
+ abnormalCombination
+ checkManager
+ checkCluster
+ installcombination
+ yn
+ common_list
+ set
+ file
+ failed
+ run_
+ or
+ defi
+ absent
@@ -76,21 +201,24 @@
-
+
+
-
-
-
+
+
+
+
+
@@ -103,11 +231,45 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
@@ -146,7 +308,10 @@
-
+
+
+
+
1629774644583
@@ -183,17 +348,24 @@
1630481026679
-
+
+ 1630481332070
+
+
+
+ 1630481332070
+
+
-
+
-
+
-
+
@@ -204,7 +376,7 @@
-
+
@@ -227,28 +399,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -256,13 +406,6 @@
-
-
-
-
-
-
-
@@ -270,5 +413,84 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/configurations/components.yml b/configurations/components.yml
index 6865007..6c9e3e7 100644
--- a/configurations/components.yml
+++ b/configurations/components.yml
@@ -122,6 +122,12 @@ topic_file_bytes: 1073741824
#缓冲池字节大小 单位,单位:MB
mariadb_innodb_buffer_pool_size: 1024
+#mariadb 端口
+galaxy_mariadb_port: 3306
+
+#mariadb 用户
+galaxy_mariadb_username: root
+
#galaxy mariadb key
galaxy_mariadb_pin: galaxy2019
@@ -134,7 +140,10 @@ spark_worker_cores: 30
#===========================Nacos===============================#
#Nacos 内存配置
-nacos_java_opt: '-Xms1024m -Xmx1024m -Xmn1024m'
+nacos_java_opt: '-Xms256m -Xmx256m -Xmn256m'
+
+#Galaxy的 Nacos pin,(默认账户为:nacos)用于galaxy-hos-service, galaxy-job-service, galaxy-qgw-service, galaxy-report-service连接nacos获取配置信息
+nacos_pin: nacos
#===========================Storm===============================#
#单个supervisor可使用worker数量,一般为CPU的一半。
@@ -149,25 +158,18 @@ storm_worker_min_mem: 1024
#===========================Flink================================#
#网络缓存大小
taskmanager_memory_network_min: 512m
-taskmanager_memory_network_max: 1280m
+taskmanager_memory_network_max: 512m
#taskmanager堆外内存
taskmanager_memory_managed_size: 256m
#TaskManager进程占用的所有与Flink相关的内存
-taskmanager_memory_flink_size: 5120m
+taskmanager_memory_flink_size: 3072m
#JobManager进程占用的所有与Flink相关的内存
-jobmanager_memory_flink_size: 1024m
+jobmanager_memory_flink_size: 512m
#===========================Zookeeper===============================#
#zookeeper 进程 启动内存大小 单位MB
zookeeper_max_mem: 1024
-#===========================Mariadb===============================#
-#mariadb 端口
-galaxy_mariadb_port: 3306
-
-#mariadb 用户
-galaxy_mariadb_username: root
-
diff --git a/configurations/config.yml b/configurations/config.yml
index b16f4fa..6674a93 100644
--- a/configurations/config.yml
+++ b/configurations/config.yml
@@ -22,7 +22,7 @@ package_path: "{{ playbook_dir | dirname }}/software_packages"
#============================bifang-Mariadb===============================#
#Bifang MariaDB 地址,用于galaxy-qgw-service、galaxy-report-service、flink-dos的业务使用。
-bifang_mariadb_host: 192.168.40.153
+bifang_mariadb_host: 127.0.0.1
#Bifang mariadb 端口
bifang_mariadb_port: 3306
@@ -31,14 +31,14 @@ bifang_mariadb_port: 3306
bifang_mariadb_database: tsg-bifang
#Bifang mariadb root's pin
-bifang_mariadb_pin: 111111
+bifang_mariadb_pin: ******
#============================虚IP===============================#
#-------------apps------------------#
#Galaxy 业务组件keepalive IP地址,填写的IP只能是和hosts-services配置的IP是同段的,且不能跟同段内已有的IP相同。
-gateway_keepalive_host: 192.168.45.252
+gateway_keepalive_host: 127.0.0.*
#Galaxy 业务组件keepalive绑定网卡名称,不支持两个服务器网卡名称不同的情况,此情况需要安装完成后手动修改配置文件
gateway_keepalive_interface: eth0
@@ -53,7 +53,7 @@ hos_keepalive_need: "yes"
#galaxy-hos-service服务keepalive IP地址 若 hos_keepalive_need为no,则此处填写gateway_keepalive_host对应的IP
#填写的IP只能是和hosts-hoskeepalive配置的IP是同段的,且不能跟同段内已有的IP相同。
-hos_keepalive_host: 192.168.45.253
+hos_keepalive_host: 127.0.0.*
#galaxy-hos-service服务keepalive绑定网卡名称,不支持两个服务器网卡名称不同的情况,此情况需要安装完成后手>动修改配置文件
#若 hos_keepalive_need 配置no则此处 可不进行修改
@@ -65,18 +65,15 @@ hos_keepalive_router_id: 62
#============================Kafka===============================#
#任务消费kafka地址,若无特殊情况为本安装集群kafka地址,格式:kafkaip1:9092,kafkaip2:9092,kafkaip3:9092.....
-kafka_source_servers: 192.168.45.37:9092,192.168.45.38:9092,192.168.45.39:9092
+kafka_source_servers: 127.0.0.1:9092
#任务结果写入kafka地址,若无特殊情况为本安装集群kafka地址,格式:kafkaip1:9092,kafkaip2:9092,kafkaip3:9092.....
-kafka_sink_servers: 192.168.45.37:9092,192.168.45.38:9092,192.168.45.39:9092
+kafka_sink_servers: 127.0.0.1:9092
#============================默认配置===============================#
#自研服务组件使用nacos 命名空间名称
services_config_namespace: prod
-#Galaxy的 Nacos pin
-nacos_pin: nacos
-
#Nacos 使用的mariadb 数据库名称
mariadb_nacos_database: nacos
diff --git a/configurations/hosts b/configurations/hosts
index 5065ea5..5582338 100644
--- a/configurations/hosts
+++ b/configurations/hosts
@@ -1,97 +1,54 @@
#此标签指定的IP用于:chproxy、galaxy-app-nginx、galaxy-job-service、galaxy-qgw-service、galaxy-report-service、galaxy-app-keepalive的安装
#最少需要指定两台。
[services]
-192.168.45.42
-192.168.45.43
#此标签指定的IP用于:galaxy-hos-service 的安装
[hos]
-192.168.45.40
-192.168.45.41
#此标签指定的IP用于:galaxy-hos-keepalive和galaxy-hos-nginx的安装
#inventories/pro/group_vars/all内的hos_keepalive_need配置是no,则此处不写任何IP,yes就写部署galaxy-hos-service服务keepalive的服务器地址。
#不能与services指定的IP相同,且最多为两台。
[hoskeepalive]
-192.168.45.40
-192.168.45.41
#此标签指定的IP用于:ArangoDB的安装
[arangodb]
-192.168.45.42
#此标签指定的IP用于:Mariadb的安装,目前版本下仅可配置两个IP,且需要与services标签的前两个IP相同。
#现没有负载需求,保留后期做负载的功能需求。
[mariadb]
-192.168.45.42
-192.168.45.43
#此标签指定的IP用于:Nacos的安装
[nacos]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Apache Zookeeper的安装
[zookeeper]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Apache Kafka的安装,集群第一台同时部署Kafka-Manager服务。
[kafka]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Apache Storm的安装,根据内部约定,若集群整体小于3台则第一台为numbus节点;大于三台则前三台为nimbus节点。
#[storm]
-#192.168.45.37
-#192.168.45.38
-#192.168.45.39
#此标签指定的IP用于:Apache Flink的安装,根据内部约定,前两台台为master节点;所有节点为worker节点。
[flink]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Apache Hadoop的安装
[hadoop]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Apache HBase的安装
[hbase]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Apache Spark的安装,根据内部约定,若集群小于3台则worker在所有节点上;大于3台则第一个节点为master节点。
[spark]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Galaxy-Gohangout的安装
[gohangout]
-192.168.45.40
#此标签指定的IP用于:Apache Druid的安装,根据内部约定,若集群整体小于4台则每台为全部节点;大于4台则前两台默认为query节点。
[druid]
-192.168.45.37
-192.168.45.38
-192.168.45.39
#此标签指定的IP用于:Clickhouse的安装,根据内部约定,前两台默认为query节点。
[clickhouse]
-192.168.45.40
-192.168.45.41
-192.168.45.42
-192.168.45.43
#dos检测的离线generate-baseline程序
[dos_baseline]
-192.168.45.43
-
diff --git a/configurations/services.yml b/configurations/services.yml
index 39cf4ad..e7c5fc4 100644
--- a/configurations/services.yml
+++ b/configurations/services.yml
@@ -11,8 +11,8 @@ qgw_java_opts: "-Xmx512m -Xms512m"
#=======================galaxy-job-service==========================#
#galaxy-job-service 服务内存设置
-job_java_opts: "-Xmx512m -Xms512m"
+job_java_opts: "-Xmx128m -Xms128m"
#======================galaxy-report-service=========================#
#galaxy-report-service 服务内存设置
-report_java_opts: "-Xmx512m -Xms512m"
+report_java_opts: "-Xmx128m -Xms128m"
diff --git a/install.sh b/install.sh
index b1e42d2..9cf328f 100755
--- a/install.sh
+++ b/install.sh
@@ -1,6 +1,5 @@
#!/bin/bash
-
#######################基础变量######################
#脚本目录
bin_path=$(cd `dirname $0`; pwd)
@@ -195,7 +194,7 @@ clear
i=0
#ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Storm Hadoop HBase Clickhouse Druid Spark Arangodb)
#替换Storm成Flink
-ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Flink Hadoop HBase Clickhouse Druid Spark Arangodb)
+ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Hadoop HBase Flink Clickhouse Druid Spark Arangodb)
echo ${ins_names[@]} > $NUM_DIR/common_list
#根据组合进行顺序安装
@@ -338,9 +337,9 @@ clear
i=0
hoskeep=`cat ../configurations/config.yml | grep -vE "^#|^$" | grep "hos_keepalive_need" | grep yes | wc -l`
if [[ $hoskeep -eq "1" ]]; then
- ins_names=(galaxy-gateway-nginx galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-service galaxy-chproxy galaxy-hos-nginx galaxy-hos-keepalive galaxy-gohangout)
+ ins_names=(galaxy-gateway-nginx galaxy-chproxy galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-nginx galaxy-hos-keepalive galaxy-hos-service galaxy-gohangout)
else
- ins_names=(galaxy-gateway-nginx galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-service galaxy-chproxy galaxy-gohangout)
+ ins_names=(galaxy-gateway-nginx galaxy-chproxy galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-service galaxy-gohangout)
fi
echo ${ins_names[@]} > $NUM_DIR/app_list
@@ -648,9 +647,31 @@ fi
clear
cat ../parcels/menu/homePage
+if [ -f $NUM_DIR/stepNum ]; then
+ step_num=`cat $NUM_DIR/stepNum`
+ next_step_num=`expr ${step_num} + 1`
+ #if [ ${next_step_num} -eq '5' ]; then
+ # echo -e "\033[33m You have successfully installed, exiting ! ! \033[0m"
+ # break;
+ #fi
+ #if [ ${next_step_num} -ne ${yn_main} ]; then
+ # #echo "Then next step should be ${next_step_num}"
+ # echo -e "\033[33m Then next step should be ${next_step_num} \033[0m"
+ # sleep 3s
+ #continue
+ #fi
+ echo -e "\033[33m Then next step should be ${next_step_num}, but you can chose other number of step if you want ! ! \033[0m"
+ else
+ echo -e "\033[33m Then next step should be 1, but you can chose other number of step if you want ! ! \033[0m"
+ #sleep 3s
+ #continue
+fi
+
read -p "
- Selection(1-4)? " yn
-case $yn in
+ Selection(1-4)? " yn_main
+
+
+case $yn_main in
[1])
while true; do
clear
@@ -664,39 +685,42 @@ echo " *
*********************************************************************************************************
"
-read -p "Enter [Y] or [N]: " yn
+read -p "Enter [yY] or [nN]: " yn
case $yn in
[Yy]* )
installcombination
+ echo ${yn_main} > $NUM_DIR/stepNum
break;;
[Nn]*)
break;;
* )
- echo "Please Enter [Y] or [N].";;
+ echo "Please Enter [yY] or [nN].";;
esac
done;;
+
[2] )
while true; do
clear
-#installApps
-
cat ../parcels/menu/appMenu
-read -p "Enter [Y] or [N]: " yn
+read -p "Enter [yY] or [nN]: " yn
case $yn in
[Yy]* )
installApps
+ echo ${yn_main} > $NUM_DIR/stepNum
break;;
[Nn]* )
break;;
* )
- echo "Please Enter [Y] or [N].";;
+ echo "Please Enter [yY] or [nN].";;
esac
done;;
[3] )
installInit
+ echo ${yn_main} > $NUM_DIR/stepNum
;;
[4] )
installCheck
+ echo ${yn_main} > $NUM_DIR/stepNum
;;
* )
echo "Please Enter (1-4)."
diff --git a/parcels/init-galaxy-hos-service.yml b/parcels/init-galaxy-hos-service.yml
index 814ebb7..a24652f 100644
--- a/parcels/init-galaxy-hos-service.yml
+++ b/parcels/init-galaxy-hos-service.yml
@@ -1,4 +1,5 @@
-- hosts: services
+- hosts:
+ - hos
remote_user: root
roles:
- init/galaxy-hos-service
diff --git a/parcels/menu/appMenu b/parcels/menu/appMenu
index 4deace8..0dfe422 100644
--- a/parcels/menu/appMenu
+++ b/parcels/menu/appMenu
@@ -1,17 +1,16 @@
Service Components:
- ┌──────────────────────────────────────┐
- ├ galaxy-qgw-service ┤
- ├ galaxy-job-service ┤
- ├ galaxy-report-service ┤
- ├ galaxy-hos-service ┤
- ├ galaxy-chproxy ┤
- ├ galaxy-gohangout ┤
- ├ galaxy-gateway-nginx ┤
- ├ galaxy-nginx-hos ┤
- ├ galaxy-gateway-keepalive ┤
- ├ galaxy-hos-keepalive ┤
- └──────────────────────────────────────┘
+ ┌───────────────────────────────────┐
+ ├ [*] galaxy-gateway-nginx ┤
+ ├ [*] galaxy-chproxy ┤
+ ├ [*] galaxy-qgw-service ┤
+ ├ [*] galaxy-job-service ┤
+ ├ [*] galaxy-report-service ┤
+ ├ [*] galaxy-hos-nginx ┤
+ ├ [*] galaxy-hos-keepalive ┤
+ ├ [*] galaxy-hos-service ┤
+ ├ [*] galaxy-gohangout ┤
+ └───────────────────────────────────┘
*********************************************************
* Press Ctrl+C or N to exit, Enter or Y to continue. *
diff --git a/parcels/menu/commonMenu b/parcels/menu/commonMenu
index 8bf158b..72e21c8 100644
--- a/parcels/menu/commonMenu
+++ b/parcels/menu/commonMenu
@@ -5,9 +5,9 @@ Open Source Software:
├ [*] Mariadb ┤
├ [*] Nacos ┤
├ [*] Apache Kafka ┤
- ├ [*] Apache Flink ┤
├ [*] Apache Hadoop ┤
├ [*] Apache HBase ┤
+ ├ [*] Apache Flink ┤
├ [*] Clickhouse ┤
├ [*] Apache Druid ┤
├ [*] Apache Spark ┤
diff --git a/parcels/roles/check/check-services/tasks/main.yml b/parcels/roles/check/check-services/tasks/main.yml
index 4e5f0ad..4ac4af6 100644
--- a/parcels/roles/check/check-services/tasks/main.yml
+++ b/parcels/roles/check/check-services/tasks/main.yml
@@ -1,51 +1,51 @@
- name: Checking Components Heartbeats
- shell: "curl -s http://{{ inventory_hostname }}:8183/monitor/health | grep DOWN | grep -v grep | wc -l"
+ shell: "curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/monitor/health | grep DOWN | grep -v grep | wc -l"
register: health
- name: Checking Components Heartbeats
fail:
- msg: "组件心跳检测异常,请通过接口 curl -s http://{{ inventory_hostname }}:8183/monitor/health 检查具体DOWN组件。"
+ msg: "组件心跳检测异常,请通过接口 curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/monitor/health 检查具体DOWN组件。"
when: health.stdout != '0'
- name: Checking Metadata
- shell: "curl -s http://{{ inventory_hostname }}:8183/diagnosis/metadata | grep '\"status\":200' | grep -v grep | wc -l"
+ shell: "curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/metadata | grep '\"status\":200' | grep -v grep | wc -l"
register: metadata
- name: Checking Metadata
fail:
- msg: "元数据验证异常,请通过接口 curl -s http://{{ inventory_hostname }}:8183/diagnosis/metadata 检查具体异常信息。"
+ msg: "元数据验证异常,请通过接口 curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/metadata 检查具体异常信息。"
when: metadata.stdout != '1'
- name: Checking SQL
- shell: "curl -s http://{{ inventory_hostname }}:8183/diagnosis/runSql | grep '\"status\":200' | grep -v grep | wc -l"
+ shell: "curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/runSql | grep '\"status\":200' | grep -v grep | wc -l"
register: sql
- name: Checking SQL
fail:
- msg: "系统内置的SQL数据集执行异常,请通过接口 curl -s http://{{ inventory_hostname }}:8183/diagnosis/runSql 检查具体异常信息。"
+ msg: "系统内置的SQL数据集执行异常,请通过接口 curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/runSql 检查具体异常信息。"
when: sql.stdout != '1'
#TODO 因服务版本问题,于21.07版本开启验证
#- name: Checking {{ groups.hos[0] }} galaxy-hos-service
-# shell: "curl --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor | grep 200 | wc -l"
+# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor | grep 200 | wc -l"
# register: monitor_out
# run_once: true
# delegate_facts: true
# delegate_to: '{{ groups.hos[0] }}'
#
#- fail:
-# msg: "HOS Monitor检测异常,请通过接口 curl --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor 检查具体异常信息"
+# msg: "HOS Monitor检测异常,请通过接口 curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor 检查具体异常信息"
# when: monitor_out.stdout != '1'
#
#- name: Checking {{ groups.hos[1] }} galaxy-hos-service
-# shell: "curl --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor | grep 200 | wc -l"
+# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor | grep 200 | wc -l"
# register: monitor_out
# run_once: true
# delegate_facts: true
# delegate_to: '{{ groups.hos[1] }}'
#
#- fail:
-# msg: "HOS Monitor检测异常,请通过接口 curl --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor 检查具体异常信息"
+# msg: "HOS Monitor检测异常,请通过接口 curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor 检查具体异常信息"
# when: monitor_out.stdout != '1'
- name: wait gohangout start ,sleep 30s
diff --git a/parcels/roles/components/clickhouse/common/templates/config.xml.j2 b/parcels/roles/components/clickhouse/common/templates/config.xml.j2
index eeaaeb5..40449e5 100644
--- a/parcels/roles/components/clickhouse/common/templates/config.xml.j2
+++ b/parcels/roles/components/clickhouse/common/templates/config.xml.j2
@@ -368,7 +368,7 @@
-->
{{ install_path }}/clickhouse/format_schemas/
-
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n
+ %msg%n
+ %d %-8r %m%n
+ {{ install_path }}/{{ druid_version }}/var/sv
+
+
+
+
+
+ ${pattern}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/parcels/roles/components/druid/templates/bak/middleManager_runtime.properties.j2 b/parcels/roles/components/druid/templates/bak/middleManager_runtime.properties.j2
new file mode 100644
index 0000000..b953c5e
--- /dev/null
+++ b/parcels/roles/components/druid/templates/bak/middleManager_runtime.properties.j2
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+druid.service=druid/middleManager
+druid.plaintextPort=8091
+
+# Number of tasks per middleManager
+druid.worker.capacity=200
+
+# Task launch parameters
+druid.indexer.runner.javaOpts=-server {{ middlemanager_runner_javaOpts }} -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dlog4j.configurationFile=conf/druid/cluster/_common/log4j2-task.xml
+druid.indexer.task.baseTaskDir=var/druid/task
+
+# HTTP server threads
+druid.server.http.numThreads=60
+
+# Processing threads and buffers on Peons
+druid.indexer.fork.property.druid.processing.numMergeBuffers={{ middlemanager_numMergeBuffers }}
+druid.indexer.fork.property.druid.processing.buffer.sizeBytes={{ middlemanager_buffer_sizeBytes }}
+druid.indexer.fork.property.druid.processing.numThreads={{ middlemanager_numThreads }}
+
+# Hadoop indexing
+druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp
+
+druid.query.groupBy.maxMergingDictionarySize=10000000000
+druid.query.groupBy.maxOnDiskStorage=10000000000
+
diff --git a/parcels/roles/components/druid/templates/bak/set_druid_env.sh.j2 b/parcels/roles/components/druid/templates/bak/set_druid_env.sh.j2
new file mode 100644
index 0000000..21a4f0a
--- /dev/null
+++ b/parcels/roles/components/druid/templates/bak/set_druid_env.sh.j2
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+
+#num=`cat /etc/profile | grep druid | wc -l`
+
+#if [ $num -eq "0" ];then
+# echo -e "\n#druid" >> /etc/profile
+# echo -e "export DRUID_HOME={{ install_path }}/{{ druid_version }}" >> /etc/profile
+# echo -e "export PATH=\$DRUID_HOME/bin:\$PATH" >> /etc/profile
+# source /etc/profile
+#fi
+
+keeppath='/etc/init.d/keepdruidall'
+ if [ -x $keeppath ];then
+ chkconfig --add keepdruidall
+ chkconfig keepdruidall on
+ fi
+
+keeppath='/etc/init.d/keepdruiddata'
+ if [ -x $keeppath ];then
+ chkconfig --add keepdruiddata
+ chkconfig keepdruiddata on
+ fi
+
+keeppath='/etc/init.d/keepdruidquery'
+ if [ -x $keeppath ];then
+ chkconfig --add keepdruidquery
+ chkconfig keepdruidquery on
+ fi
+
diff --git a/parcels/roles/components/druid/templates/bak/unload_druid.sh.j2 b/parcels/roles/components/druid/templates/bak/unload_druid.sh.j2
new file mode 100644
index 0000000..173167e
--- /dev/null
+++ b/parcels/roles/components/druid/templates/bak/unload_druid.sh.j2
@@ -0,0 +1,43 @@
+#!/bin/bash
+source /etc/profile
+
+function killService(){
+keeppath='/etc/init.d/keepdruidall'
+if [ -x $keeppath ];then
+service keepdruidall stop
+chkconfig keepdruidall off
+systemctl daemon-reload
+rm -rf /etc/init.d/keepdruidall
+fi
+
+keeppath='/etc/init.d/keepdruiddata'
+if [ -x $keeppath ];then
+service keepdruiddata stop
+chkconfig keepdruiddata off
+systemctl daemon-reload
+rm -rf /etc/init.d/keepdruiddata
+fi
+
+keeppath='/etc/init.d/keepdruidquery'
+if [ -x $keeppath ];then
+service keepdruidquery stop
+chkconfig keepdruidquery off
+systemctl daemon-reload
+rm -rf /etc/init.d/keepdruidquery
+fi
+
+}
+
+function killPid(){
+
+livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
+if [ $livenum -ne 0 ];then
+ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
+fi
+
+}
+
+killService
+sleep 15
+killPid
+rm -rf {{ install_path }}/{{ druid_version }}
diff --git a/parcels/roles/components/druid/templates/keepdruidall.j2 b/parcels/roles/components/druid/templates/keepdruidall.j2
index 0892183..063f556 100644
--- a/parcels/roles/components/druid/templates/keepdruidall.j2
+++ b/parcels/roles/components/druid/templates/keepdruidall.j2
@@ -20,16 +20,18 @@ fi
;;
stop)
-ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null
-#keeppid=`ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null`
-#echo "守护进程PID:$keeppid"
-#kill -9 $keeppid
+keeppid=`ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'`
+if [ `ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'` -ne 0 ]
+then
+echo "守护进程PID:$keeppid"
+kill -9 $keeppid
echo "关闭所有druid进程"
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
-livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
-if [ $livenum -ne 0 ];then
-ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9 2>/dev/null
fi
+#livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
+#if [ $livenum -ne 0 ];then
+#ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
+#fi
;;
status)
diff --git a/parcels/roles/components/druid/templates/keepdruiddata.j2 b/parcels/roles/components/druid/templates/keepdruiddata.j2
index 99cfd40..5fa71b6 100644
--- a/parcels/roles/components/druid/templates/keepdruiddata.j2
+++ b/parcels/roles/components/druid/templates/keepdruiddata.j2
@@ -21,14 +21,17 @@ fi
stop)
keeppid=`ps -ef | grep dae-druid-data.sh | grep -v grep | awk '{print $2}'`
+if [ `ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'` -ne 0 ]
+then
echo "守护进程PID:$keeppid"
kill -9 $keeppid
echo "关闭所有druid进程"
-#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
-livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
-if [ $livenum -ne 0 ];then
-ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
fi
+
+#livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
+#if [ $livenum -ne 0 ];then
+#ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
+#fi
;;
status)
diff --git a/parcels/roles/components/druid/templates/keepdruidquery.j2 b/parcels/roles/components/druid/templates/keepdruidquery.j2
index 5d63a00..4960ad2 100644
--- a/parcels/roles/components/druid/templates/keepdruidquery.j2
+++ b/parcels/roles/components/druid/templates/keepdruidquery.j2
@@ -21,14 +21,18 @@ fi
stop)
keeppid=`ps -ef | grep dae-druid-query.sh | grep -v grep | awk '{print $2}'`
+
+if [ `ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'` -ne 0 ]
+then
echo "守护进程PID:$keeppid"
kill -9 $keeppid
echo "关闭所有druid进程"
-#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
-livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
-if [ $livenum -ne 0 ];then
-ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
fi
+
+#livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
+#if [ $livenum -ne 0 ];then
+#ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
+#fi
;;
status)
diff --git a/parcels/roles/components/druid/templates/unload_druid.sh.j2 b/parcels/roles/components/druid/templates/unload_druid.sh.j2
index 173167e..bda3a29 100644
--- a/parcels/roles/components/druid/templates/unload_druid.sh.j2
+++ b/parcels/roles/components/druid/templates/unload_druid.sh.j2
@@ -38,6 +38,6 @@ fi
}
killService
-sleep 15
-killPid
+#sleep 15
+#killPid
rm -rf {{ install_path }}/{{ druid_version }}
diff --git a/parcels/roles/components/flink/tasks/install_flink.yml b/parcels/roles/components/flink/tasks/install_flink.yml
index 4c2331b..36cf851 100644
--- a/parcels/roles/components/flink/tasks/install_flink.yml
+++ b/parcels/roles/components/flink/tasks/install_flink.yml
@@ -120,7 +120,7 @@
#启动flink守护
- name: start keepflinkalive service
- shell: source /etc/profile && sh /etc/init.d/keepflinkalive start
+ shell: source /etc/profile && /bin/bash /etc/init.d/keepflinkalive start
when: hostvars[inventory_hostname]['ansible_env'].SSH_CONNECTION.split(' ')[2] == groups.flink[0]
diff --git a/parcels/roles/components/flink/templates/set_flink_env.sh.j2 b/parcels/roles/components/flink/templates/set_flink_env.sh.j2
index 336fd1b..d861ec7 100755
--- a/parcels/roles/components/flink/templates/set_flink_env.sh.j2
+++ b/parcels/roles/components/flink/templates/set_flink_env.sh.j2
@@ -1,13 +1,9 @@
#!/bin/bash
-num=`cat /etc/profile | grep flink | wc -l`
-if [ $num -eq "0" ];then
- echo -e "\n#flink" >> /etc/profile
- echo -e "export FLINK_HOME={{ install_path }}/{{ flink_version }}" >> /etc/profile
- echo -e "export PATH=\$FLINK_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+echo -e "\n#flink\nexport FLINK_HOME={{ install_path }}/{{ flink_version }}\nexport PATH=\$FLINK_HOME/bin:\$PATH" >> /etc/profile.d/flink.sh
+chmod +x /etc/profile.d/flink.sh
+source /etc/profile
keeppath='/etc/init.d/keepflinkalive'
if [ -x $keeppath ];then
diff --git a/parcels/roles/components/flink/templates/unload_flink.sh.j2 b/parcels/roles/components/flink/templates/unload_flink.sh.j2
index 78ddbf2..c64ea67 100755
--- a/parcels/roles/components/flink/templates/unload_flink.sh.j2
+++ b/parcels/roles/components/flink/templates/unload_flink.sh.j2
@@ -22,4 +22,5 @@ killService
sleep 5
killPid
rm -rf {{ install_path }}/{{ flink_version }}
+rm -rf /etc/profile.d/flink.sh
sleep 5
diff --git a/parcels/roles/components/hadoop/tasks/install_hadoop.yml b/parcels/roles/components/hadoop/tasks/install_hadoop.yml
index 46c9f55..122abba 100644
--- a/parcels/roles/components/hadoop/tasks/install_hadoop.yml
+++ b/parcels/roles/components/hadoop/tasks/install_hadoop.yml
@@ -202,7 +202,7 @@
shell: cd {{ install_path }}/{{ hadoop_version }}/bin/ && ./set_hadoop_env.sh
- name: start hadoop journal
- shell: source /etc/profile && sh /etc/init.d/keephdfsjournal start
+ shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsjournal start
run_once: true
delegate_facts: true
delegate_to: "{{ item.ip }}"
@@ -283,7 +283,7 @@
delegate_to: "{{ master_ip }}"
- name: start hadoop-master
- shell: service keephdfsmaster start
+ shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsmaster start
run_once: true
delegate_facts: true
delegate_to: "{{ master_ip }}"
@@ -298,13 +298,13 @@
delegate_to: "{{ slave1_ip }}"
- name: start hadoop-slave
- shell: service keephdfsslave start
+ shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsslave start
run_once: true
delegate_facts: true
delegate_to: "{{ slave1_ip }}"
- name: start hadoop-worker
- shell: service keephdfsworker start
+ shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsworker start
- name: deleted hadoop.tar.gz
shell: rm -rf {{ install_path }}/hadoop-2.7.1.tar.gz
diff --git a/parcels/roles/components/hadoop/templates/set_hadoop_env.sh.j2 b/parcels/roles/components/hadoop/templates/set_hadoop_env.sh.j2
index b37c117..32e1b2f 100644
--- a/parcels/roles/components/hadoop/templates/set_hadoop_env.sh.j2
+++ b/parcels/roles/components/hadoop/templates/set_hadoop_env.sh.j2
@@ -1,15 +1,12 @@
#!/bin/bash
-num=`cat /etc/profile | grep hadoop | wc -l`
-if [ $num -eq "0" ];then
- echo -e "\n#hadoop" >> /etc/profile
- echo -e "export HADOOP_HOME={{ install_path }}/{{ hadoop_version }}" >> /etc/profile
- echo -e "export PATH=\$HADOOP_HOME/sbin:\$PATH" >> /etc/profile
- echo -e "export PATH=\$HADOOP_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+
+echo -e "\n#hadoop\nexport HADOOP_HOME={{ install_path }}/{{ hadoop_version }}\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH" >> /etc/profile.d/hadoop.sh
+chmod +x /etc/profile.d/hadoop.sh
+
+source /etc/profile
keeppath='/etc/init.d/keephdfsmaster'
if [ -x $keeppath ];then
diff --git a/parcels/roles/components/hadoop/templates/unload_hadoop.sh.j2 b/parcels/roles/components/hadoop/templates/unload_hadoop.sh.j2
index 37c4c0c..b066253 100644
--- a/parcels/roles/components/hadoop/templates/unload_hadoop.sh.j2
+++ b/parcels/roles/components/hadoop/templates/unload_hadoop.sh.j2
@@ -67,4 +67,4 @@ sleep 15
killPid
rm -rf {{ install_path }}/{{ hadoop_version }}
rm -rf {{ install_path }}/hadoop
-
+rm -rf /etc/profile.d/hadoop.sh
diff --git a/parcels/roles/components/hbase/tasks/install_hbase.yml b/parcels/roles/components/hbase/tasks/install_hbase.yml
index 6c9a18f..fc133b0 100644
--- a/parcels/roles/components/hbase/tasks/install_hbase.yml
+++ b/parcels/roles/components/hbase/tasks/install_hbase.yml
@@ -210,7 +210,7 @@
shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && sh set_hbase_env.sh
- name: start hbase master
- shell: source /etc/profile && sh /etc/init.d/keephbasemaster 'start'
+ shell: source /etc/profile && /bin/bash /etc/init.d/keephbasemaster 'start'
run_once: true
delegate_facts: true
delegate_to: "{{ item.ip }}"
@@ -220,7 +220,7 @@
- { ip: '{{ slave2_ip }}' }
- name: start hbase region
- shell: source /etc/profile && sh /etc/init.d/keephbaseregion 'start'
+ shell: source /etc/profile && /bin/bash /etc/init.d/keephbaseregion 'start'
- name: Ansible delete {{ install_path }}/{{ hbase_version }}.tar.gz
file:
diff --git a/parcels/roles/components/hbase/templates/set_hbase_env.sh.j2 b/parcels/roles/components/hbase/templates/set_hbase_env.sh.j2
index c07e48b..ba47125 100644
--- a/parcels/roles/components/hbase/templates/set_hbase_env.sh.j2
+++ b/parcels/roles/components/hbase/templates/set_hbase_env.sh.j2
@@ -1,14 +1,8 @@
#!/bin/bash
-
-num=`cat /etc/profile | grep hbase | wc -l`
-
-if [ $num -eq "0" ];then
- echo -e "\n#hbase" >> /etc/profile
- echo -e "export HBASE_HOME={{ data_path }}/{{ hbase_version }}" >> /etc/profile
- echo -e "export PATH=\$HBASE_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+echo -e "\n#hbase\nexport HBASE_HOME={{ data_path }}/{{ hbase_version }}\nexport PATH=\$HBASE_HOME/bin:\$PATH" >> /etc/profile.d/hbase.sh
+chmod +x /etc/profile.d/hbase.sh
+source /etc/profile
keeppath='/etc/init.d/keephbasemaster'
if [ -x $keeppath ];then
diff --git a/parcels/roles/components/hbase/templates/unload_hbase.sh.j2 b/parcels/roles/components/hbase/templates/unload_hbase.sh.j2
index ed8ae9d..0b8fac9 100644
--- a/parcels/roles/components/hbase/templates/unload_hbase.sh.j2
+++ b/parcels/roles/components/hbase/templates/unload_hbase.sh.j2
@@ -37,4 +37,4 @@ killService
sleep 15
killPid
rm -rf {{ install_path }}/{{ hbase_version }}
-
+rm -rf /etc/profile.d/hbase.sh
diff --git a/parcels/roles/components/kafka/tasks/install_kafka.yml b/parcels/roles/components/kafka/tasks/install_kafka.yml
index 64a3815..9f5ac75 100644
--- a/parcels/roles/components/kafka/tasks/install_kafka.yml
+++ b/parcels/roles/components/kafka/tasks/install_kafka.yml
@@ -209,7 +209,7 @@
#启动kafka守护
- name: start keepkafalive service
- shell: source /etc/profile && sh /etc/init.d/keepkafalive 'start'
+ shell: source /etc/profile && /bin/bash /etc/init.d/keepkafalive 'start'
- name: Ansible delete {{ kafka_version }}.tar.gz
file:
diff --git a/parcels/roles/components/kafka/templates/set_kafka_env.sh.j2 b/parcels/roles/components/kafka/templates/set_kafka_env.sh.j2
index 755b2dc..2014103 100755
--- a/parcels/roles/components/kafka/templates/set_kafka_env.sh.j2
+++ b/parcels/roles/components/kafka/templates/set_kafka_env.sh.j2
@@ -1,13 +1,8 @@
#!/bin/bash
-num=`cat /etc/profile | grep kafka | wc -l`
-
-if [ $num -eq "0" ];then
- echo -e "\n#kafka" >> /etc/profile
- echo -e "export KAFKA_HOME={{ install_path }}/{{ kafka_version }}" >> /etc/profile
- echo -e "export PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+echo -e "\n#kafka\nexport KAFKA_HOME={{ install_path }}/{{ kafka_version }}\nexport PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile.d/kafka.sh
+chmod +x /etc/profile.d/kafka.sh
+source /etc/profile
keeppath='/etc/init.d/keepkafalive'
if [ -x $keeppath ];then
diff --git a/parcels/roles/components/kafka/templates/unload_kafka.sh.j2 b/parcels/roles/components/kafka/templates/unload_kafka.sh.j2
index 1800a96..a6a512d 100755
--- a/parcels/roles/components/kafka/templates/unload_kafka.sh.j2
+++ b/parcels/roles/components/kafka/templates/unload_kafka.sh.j2
@@ -24,4 +24,5 @@ sleep 5
killPid
rm -rf {{ install_path }}/{{ kafka_version }}
rm -rf {{ data_path }}/{{ kafka_version }}
+rm -rf /etc/profile.d/kafka.sh
sleep 5
diff --git a/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml b/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml
index 73d5687..43562bb 100644
--- a/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml
+++ b/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml
@@ -45,39 +45,39 @@
when: (allowed_unload) == "no"
- name: check mariadb version; if mariadb does not exist, it will be print error;Don't Worry!!!
- shell: mysql -uroot
- ignore_errors: True
+ #shell: mysql -uroot
+ shell: ps aux | grep mysqld | grep -v grep | wc -l
register: result
- name: create directory path:/data/mariadb/logs
file:
state: directory
path: '/data/mariadb/logs'
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: create directory path:/data/mariadb/sql
file:
state: directory
path: '/data/mariadb/sql'
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: create directory path:{{ install_path }}/mariadb/sql
file:
state: directory
path: '{{ install_path }}/mariadb'
- when: result.stderr is defined and result.stderr != ''
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: copy
copy: src=files/slow_query.log dest=/data/mariadb/logs force=true backup=yes
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: mariadb service not exist; copy -rpm.zip and unzip mariadb.zip to {{ install_path }}/mariadb
unarchive: src={{ package_path }}/mariadb.zip dest={{ install_path }}/mariadb copy=yes
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: mariadb service not exist; install mariadb
shell: rpm -ivh {{ install_path }}/mariadb/*.rpm --force --nodeps && systemctl start mysql && systemctl enable mysql
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
register: install_mariadb_result
- name: copy my.cnf
@@ -87,7 +87,7 @@
force: yes
run_once: true
delegate_to: '{{ groups.mariadb[0] }}'
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: copy my.cnf
template:
@@ -96,15 +96,15 @@
force: yes
run_once: true
delegate_to: '{{ groups.mariadb[1] }}'
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: change mariadb root password
shell: mysql -uroot -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '{{ galaxy_mariadb_pin }}';"
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: change mariadb remote authority
shell: mysql -uroot -p{{ galaxy_mariadb_pin }} -e"use mysql;grant all privileges on *.* to 'root'@'%' identified by '{{ galaxy_mariadb_pin }}' with grant option;FLUSH PRIVILEGES;"
- when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
- name: restart mariadb
shell: systemctl restart mysql
diff --git a/parcels/roles/components/nacos/files/mysql b/parcels/roles/components/nacos/files/mysql
index 66af1a1..eb26146 100755
Binary files a/parcels/roles/components/nacos/files/mysql and b/parcels/roles/components/nacos/files/mysql differ
diff --git a/parcels/roles/components/nacos/tasks/install_nacos.yml b/parcels/roles/components/nacos/tasks/install_nacos.yml
index 8f7e334..f418366 100644
--- a/parcels/roles/components/nacos/tasks/install_nacos.yml
+++ b/parcels/roles/components/nacos/tasks/install_nacos.yml
@@ -156,7 +156,7 @@
#启动守护
- name: start keepnacosalive service
- shell: source /etc/profile && sh /etc/init.d/keepnacosalive 'start'
+ shell: source /etc/profile && /bin/bash /etc/init.d/keepnacosalive 'start'
- name: Ansible delete {{ nacos_version }}.tar.gz
file:
diff --git a/parcels/roles/components/nacos/templates/set-nacos-env.sh.j2 b/parcels/roles/components/nacos/templates/set-nacos-env.sh.j2
index 55e3068..dd69dd7 100755
--- a/parcels/roles/components/nacos/templates/set-nacos-env.sh.j2
+++ b/parcels/roles/components/nacos/templates/set-nacos-env.sh.j2
@@ -1,13 +1,9 @@
#!/bin/bash
-num=`cat /etc/profile | grep nacos | wc -l`
-
-if [ $num -eq "0" ];then
- echo -e "\n#nacos" >> /etc/profile
- echo -e "export NACOS_HOME={{ install_path }}/{{ nacos_version }}" >> /etc/profile
- echo -e "export PATH=\$NACOS_HOME/bin:\$PATH" >> /etc/profile
-fi
+echo -e "\n#nacos\nexport NACOS_HOME={{ install_path }}/{{ nacos_version }}\nexport PATH=\$NACOS_HOME/bin:\$PATH" >> /etc/profile.d/nacos.sh
+chmod +x /etc/profile.d/nacos.sh
source /etc/profile
+
keeppath='/etc/init.d/keepnacosalive'
if [ -x $keeppath ];then
chkconfig --add keepnacosalive
diff --git a/parcels/roles/components/nacos/templates/unload_nacos.sh.j2 b/parcels/roles/components/nacos/templates/unload_nacos.sh.j2
index 2740588..91a0064 100755
--- a/parcels/roles/components/nacos/templates/unload_nacos.sh.j2
+++ b/parcels/roles/components/nacos/templates/unload_nacos.sh.j2
@@ -23,4 +23,6 @@ killService
sleep 10
killPid
rm -rf {{ install_path }}/{{ nacos_version }}
+rm -rf /etc/profile.d/nacos.sh
sleep 5
+
diff --git a/parcels/roles/components/spark/templates/set_spark_env.sh.j2 b/parcels/roles/components/spark/templates/set_spark_env.sh.j2
index e035754..daf7488 100755
--- a/parcels/roles/components/spark/templates/set_spark_env.sh.j2
+++ b/parcels/roles/components/spark/templates/set_spark_env.sh.j2
@@ -1,14 +1,8 @@
#!/bin/bash
-num=`cat /etc/profile | grep spark | wc -l`
-
-if [ $num -eq "0" ];then
- echo -e "\n#spark" >> /etc/profile
- echo -e "export SPARK_HOME={{ install_path }}/{{ spark_version }}" >> /etc/profile
- echo -e "export PATH=\$SPARK_HOME/sbin:\$PATH" >> /etc/profile
- echo -e "export PATH=\$SPARK_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+echo -e "\n#spark\nexport SPARK_HOME={{ install_path }}/{{ spark_version }}\nexport PATH=\$SPARK_HOME/sbin:\$PATH\nexport PATH=\$SPARK_HOME/bin:\$PATH" >> /etc/profile.d/spark.sh
+chmod +x /etc/profile.d/spark.sh
+source /etc/profile
keeppath='/etc/init.d/keepsparkall'
if [ -x $keeppath ];then
diff --git a/parcels/roles/components/storm/templates/set_storm_env.sh.j2 b/parcels/roles/components/storm/templates/set_storm_env.sh.j2
index ce28c15..3e16ecd 100755
--- a/parcels/roles/components/storm/templates/set_storm_env.sh.j2
+++ b/parcels/roles/components/storm/templates/set_storm_env.sh.j2
@@ -1,12 +1,8 @@
#!/bin/bash
-num=`cat /etc/profile | grep storm | wc -l`
-if [ $num -eq "0" ];then
- echo -e "\n#storm" >> /etc/profile
- echo -e "export STORM_HOME={{ install_path }}/{{ storm_version }}" >> /etc/profile
- echo -e "export PATH=\$STORM_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+echo -e "\n#storm\nexport STORM_HOME={{ install_path }}/{{ storm_version }}\nexport PATH=\$STORM_HOME/bin:\$PATH" >> /etc/profile.d/storm.sh
+chmod +x /etc/profile.d/storm.sh
+source /etc/profile
keeppath='/etc/init.d/keepnimalive'
if [ -x $keeppath ];then
diff --git a/parcels/roles/components/storm/templates/unload_storm.sh.j2 b/parcels/roles/components/storm/templates/unload_storm.sh.j2
index 422f08f..e566695 100755
--- a/parcels/roles/components/storm/templates/unload_storm.sh.j2
+++ b/parcels/roles/components/storm/templates/unload_storm.sh.j2
@@ -37,3 +37,4 @@ killService
sleep 5
killPid
rm -rf {{ install_path }}/{{ storm_version }}
+rm -rf /etc/profile.d/storm.sh
diff --git a/parcels/roles/components/zookeeper/tasks/install_zk.yml b/parcels/roles/components/zookeeper/tasks/install_zk.yml
index 623d16e..8166100 100644
--- a/parcels/roles/components/zookeeper/tasks/install_zk.yml
+++ b/parcels/roles/components/zookeeper/tasks/install_zk.yml
@@ -157,7 +157,7 @@
force_source: yes
- name: start keepzkalive service
- shell: source /etc/profile && sh /etc/init.d/keepzkalive 'start'
+ shell: source /etc/profile && /bin/bash /etc/init.d/keepzkalive 'start'
- name: waiting zookeeper start sleep 10 s
shell: sleep 10
@@ -167,7 +167,7 @@
register: zkstatus_out
- name: start keepzkalive service again
- shell: source /etc/profile && sh /etc/init.d/keepzkalive 'start'
+ shell: source /etc/profile && /bin/bash /etc/init.d/keepzkalive 'start'
when: zkstatus_out.stdout != '1'
- name: start zookeeper_exporter
diff --git a/parcels/roles/components/zookeeper/templates/keepzkalive.j2 b/parcels/roles/components/zookeeper/templates/keepzkalive.j2
index 65de403..773425b 100644
--- a/parcels/roles/components/zookeeper/templates/keepzkalive.j2
+++ b/parcels/roles/components/zookeeper/templates/keepzkalive.j2
@@ -12,7 +12,7 @@ start)
keepnum=`ps -ef | grep dae-zookeeper.sh | grep -v grep | wc -l`
if [ $keepnum -eq "0" ];then
-nohup {{ install_path }}/{{ zookeeper_version }}/bin/dae-zookeeper.sh /home/bigdata > /dev/null 2>&1 &
+nohup {{ install_path }}/{{ zookeeper_version }}/bin/dae-zookeeper.sh > /dev/null 2>&1 &
fi
;;
diff --git a/parcels/roles/components/zookeeper/templates/set_zk_env.sh.j2 b/parcels/roles/components/zookeeper/templates/set_zk_env.sh.j2
index 305ccd2..048e868 100755
--- a/parcels/roles/components/zookeeper/templates/set_zk_env.sh.j2
+++ b/parcels/roles/components/zookeeper/templates/set_zk_env.sh.j2
@@ -1,13 +1,7 @@
#!/bin/bash
-num=`cat /etc/profile | grep zookeeper | wc -l`
-
-if [ $num -eq "0" ];then
- echo -e "\n#zookeeper" >> /etc/profile
- echo -e "export ZOOKEEPER_HOME={{ install_path }}/{{ zookeeper_version }}" >> /etc/profile
- echo -e "export PATH=\$ZOOKEEPER_HOME/bin:\$PATH" >> /etc/profile
- source /etc/profile
-fi
+echo -e "\n#zookeeper\nexport ZOOKEEPER_HOME={{ install_path }}/{{ zookeeper_version }}\nexport PATH=\$ZOOKEEPER_HOME/bin:\$PATH" >> /etc/profile.d/zookeeper.sh
+chmod +x /etc/profile.d/zookeeper.sh
keeppsth='/etc/init.d/keepzkalive'
if [ -x $keeppsth ];then
diff --git a/parcels/roles/components/zookeeper/templates/unload_zk.sh.j2 b/parcels/roles/components/zookeeper/templates/unload_zk.sh.j2
index 86548fb..b561d41 100755
--- a/parcels/roles/components/zookeeper/templates/unload_zk.sh.j2
+++ b/parcels/roles/components/zookeeper/templates/unload_zk.sh.j2
@@ -8,7 +8,7 @@ livenum=`jps -l | grep "org.apache.zookeeper.server.quorum.QuorumPeerMain" | wc
if [ $livenum -ne 0 ];then
keeppid=`jps -l |grep zookeeper | awk '{print $1}'`
kill -9 $keeppid
-
+rm -rf /etc/profile.d/zookeeper.sh
rm -rf {{ install_path }}/{{ zookeeper_version }}
rm -rf {{ data_path }}/{{ zookeeper_version }}
chkconfig keepzkalive off
diff --git a/parcels/roles/dependencies/environments/install_docker/docker/containerd b/parcels/roles/dependencies/environments/install_docker/docker/containerd
index 7a67143..cbe3092 100755
Binary files a/parcels/roles/dependencies/environments/install_docker/docker/containerd and b/parcels/roles/dependencies/environments/install_docker/docker/containerd differ
diff --git a/parcels/roles/dependencies/environments/install_docker/docker/ctr b/parcels/roles/dependencies/environments/install_docker/docker/ctr
index cf81207..5cd0cfa 100755
Binary files a/parcels/roles/dependencies/environments/install_docker/docker/ctr and b/parcels/roles/dependencies/environments/install_docker/docker/ctr differ
diff --git a/parcels/roles/dependencies/environments/install_docker/docker/docker b/parcels/roles/dependencies/environments/install_docker/docker/docker
index 9487cde..a123128 100755
Binary files a/parcels/roles/dependencies/environments/install_docker/docker/docker and b/parcels/roles/dependencies/environments/install_docker/docker/docker differ
diff --git a/parcels/roles/dependencies/environments/install_docker/docker/dockerd b/parcels/roles/dependencies/environments/install_docker/docker/dockerd
index 307836c..df43703 100755
Binary files a/parcels/roles/dependencies/environments/install_docker/docker/dockerd and b/parcels/roles/dependencies/environments/install_docker/docker/dockerd differ
diff --git a/parcels/roles/init/clickhouse/tasks/init_clickhouse.yml b/parcels/roles/init/clickhouse/tasks/init_clickhouse.yml
index 05a29f5..c31a15a 100644
--- a/parcels/roles/init/clickhouse/tasks/init_clickhouse.yml
+++ b/parcels/roles/init/clickhouse/tasks/init_clickhouse.yml
@@ -12,6 +12,12 @@
delegate_to: "{{ query_ip }}"
- name: create clickhouse tables
+ shell: clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ root_key }} --multiquery < {{ install_path }}/create_ck_table.sql 2>/dev/null
+ run_once: true
+ ignore_errors: True
+ delegate_to: "{{ query_ip }}"
+
+- name: check clickhouse tables
shell: clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ root_key }} --multiquery < {{ install_path }}/create_ck_table.sql
run_once: true
delegate_to: "{{ query_ip }}"
diff --git a/parcels/roles/init/dos-baseline/tasks/init_dos.yml b/parcels/roles/init/dos-baseline/tasks/init_dos.yml
index dad8736..504b6d4 100644
--- a/parcels/roles/init/dos-baseline/tasks/init_dos.yml
+++ b/parcels/roles/init/dos-baseline/tasks/init_dos.yml
@@ -51,7 +51,8 @@
cron:
name: 'dos-baseline'
hour: "3"
- weekday: "1"
job: '/bin/sh {{ install_path }}/dos-baseline/start.sh'
user: root
delegate_to: "{{ master_ip }}"
+
+
diff --git a/parcels/roles/init/druid/files/druid_topology/rule/supervisor-manger b/parcels/roles/init/druid/files/druid_topology/rule/supervisor-manger
index 33673cf..c51a86f 100644
--- a/parcels/roles/init/druid/files/druid_topology/rule/supervisor-manger
+++ b/parcels/roles/init/druid/files/druid_topology/rule/supervisor-manger
@@ -18,31 +18,31 @@ case $1 in
resetAll)
for var in ${common_task_name[@]};
do
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
sleep 2
done
for var in ${hot_task_name[@]};
do
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
sleep 2
done
;;
terminateAll)
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/terminateAll
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/terminateAll
;;
shutdownAllTasks)
for var in ${common_task_name[@]};
do
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
sleep 2
done
for var in ${hot_task_name[@]};
do
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
sleep 2
done
;;
@@ -52,7 +52,7 @@ case $1 in
echo "Usage: supervisor-manger reset "
exit 1
fi
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/reset
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/reset
;;
terminate)
@@ -60,7 +60,7 @@ case $1 in
echo "Usage: supervisor-manger terminate "
exit 1
fi
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/terminate
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/terminate
;;
shutdownTasks)
@@ -68,7 +68,7 @@ case $1 in
echo "Usage: supervisor-manger shutdownTasks "
exit 1
fi
- curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/$2/shutdownAllTasks
+ curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/$2/shutdownAllTasks
;;
*)
echo "Usage: supervisor-manger {resetAll|terminateAll|shutdownAllTasks}"
diff --git a/parcels/roles/init/druid/tasks/main.yml b/parcels/roles/init/druid/tasks/main.yml
index 7d2dbb5..db90a94 100644
--- a/parcels/roles/init/druid/tasks/main.yml
+++ b/parcels/roles/init/druid/tasks/main.yml
@@ -38,11 +38,13 @@
run_once: true
delegate_to: '{{ hadoop_ip }}'
-- name: check if {{ mariadb_druid_database }} exist
- shell: mysql -s -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} -e "SELECT COUNT(1) FROM druid.druid_segments WHERE start = '3000-01-02T00:00:00.000Z'"
- register: segments_nums
- run_once: true
- delegate_to: "127.0.0.1"
+#- name: check if {{ mariadb_druid_database }} exist, if 'FAILED message', please ignore ! ! !
+# shell: mysql -s -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} -e "SELECT COUNT(1) FROM druid.druid_segments WHERE start = '3000-01-02T00:00:00.000Z'"
+# register: segments_nums
+# run_once: true
+# delegate_to: "127.0.0.1"
+# ignore_errors: True
+# failed_when: "'FAILED' in segments_nums.stderr or segments_nums.stdout != '20'"
- name: copy segments.sql to ~
copy:
@@ -51,13 +53,13 @@
force: true
run_once: true
delegate_to: '127.0.0.1'
- when: segments_nums.stdout != '20'
+ #when: segments_nums is defined
- name: insert segments to mariadb
shell: 'mysql -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} {{ mariadb_druid_database }} < ~/druid_segments-tsg3.0.sql'
run_once: true
delegate_to: '127.0.0.1'
- when: segments_nums.stdout != '20'
+ #when: segments_nums is defined
- name: copy druid_topology to {{ install_path }}
copy:
diff --git a/parcels/roles/init/druid/templates/ingestionTask.sh.j2 b/parcels/roles/init/druid/templates/ingestionTask.sh.j2
index 27117d0..406f97e 100644
--- a/parcels/roles/init/druid/templates/ingestionTask.sh.j2
+++ b/parcels/roles/init/druid/templates/ingestionTask.sh.j2
@@ -6,11 +6,11 @@ task_name=`cat topology_list.txt`
kafka_host={{ kafka_source_servers }}
druid_host={{ groups.druid[0] }}
-curl -i -XGET 'http://'$druid_host':8081/druid/indexer/v1/leader'
+curl --retry-delay 3 --retry 3 -i -XGET 'http://'$druid_host':8088/druid/indexer/v1/leader'
num=$?
while [ $num -gt "0" ];do
sleep 5
- curl -s -XGET 'http://'$druid_host':8081/druid/indexer/v1/leader'
+ curl --retry-delay 3 --retry 3 -s -XGET 'http://'$druid_host':8088/druid/indexer/v1/leader'
num=$?
done
cp ./tasks/* ./
@@ -18,7 +18,7 @@ cp ./tasks/* ./
for var in ${task_name[@]};
do
sed -i 's/kafkabootstrap/'$kafka_host'/' ${var}
-curl -X 'POST' -H 'Content-Type:application/json' -d @${var} http://$druid_host:8081/druid/indexer/v1/supervisor
+curl --retry-delay 3 --retry 3 -X 'POST' -H 'Content-Type:application/json' -d @${var} http://$druid_host:8088/druid/indexer/v1/supervisor
echo "'${var}' 任务启动成功"
sleep 2
done
diff --git a/parcels/roles/init/druid/templates/post_rule.sh.j2 b/parcels/roles/init/druid/templates/post_rule.sh.j2
index f97c1b4..f9064f2 100755
--- a/parcels/roles/init/druid/templates/post_rule.sh.j2
+++ b/parcels/roles/init/druid/templates/post_rule.sh.j2
@@ -14,14 +14,14 @@ hot_rule_file="$base_dir/hot_data_rule.json"
for var in ${common_task_name[@]};
do
-curl -X 'POST' -H 'Content-Type:application/json' -d @$common_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
+curl --retry-delay 3 --retry 3 -X 'POST' -H 'Content-Type:application/json' -d @$common_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
echo "'${var}' 任务启动成功"
sleep 2
done
for var in ${hot_task_name[@]};
do
-curl -X 'POST' -H 'Content-Type:application/json' -d @$hot_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
+curl --retry-delay 3 --retry 3 -X 'POST' -H 'Content-Type:application/json' -d @$hot_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
echo "'${var}' 任务启动成功"
sleep 2
done
diff --git a/parcels/roles/init/flink/tasks/main.yml b/parcels/roles/init/flink/tasks/main.yml
index 9d04154..ff2118e 100644
--- a/parcels/roles/init/flink/tasks/main.yml
+++ b/parcels/roles/init/flink/tasks/main.yml
@@ -15,7 +15,7 @@
src: '{{ package_path }}/topology'
dest: '{{ install_path }}/'
force: true
- backup: yes
+ backup: false
#其他服务器上的包就当备份
#delegate_to: "{{ master_ip }}"
diff --git a/parcels/roles/init/galaxy-hos-service/tasks/main.yml b/parcels/roles/init/galaxy-hos-service/tasks/main.yml
index f32a622..b2f8d6f 100644
--- a/parcels/roles/init/galaxy-hos-service/tasks/main.yml
+++ b/parcels/roles/init/galaxy-hos-service/tasks/main.yml
@@ -7,59 +7,59 @@
port: 8186
timeout: 60
-- name: init hos service
- shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/admin/initialize' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+- name: Initialize Galaxy-hos-service
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/admin/initialize' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
register: initialize
run_once: true
delegate_facts: true
delegate_to: '{{ master_ip }}'
-- name: check hos init status
+- name: Checking Initialize status
fail:
msg: "HOS 内部初始化接口执行失败!请检查"
when: initialize.stdout != '200'
-- name: delete firewall_hos_bucket
- shell: "curl --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
- register: delete
- run_once: true
- delegate_facts: true
- delegate_to: '{{ master_ip }}'
-
-- name: check delete firewall_hos_bucket status
- fail:
- msg: "HOS 删除 firewall_hos_bucket 桶异常"
- when: delete.stdout != '204'
-
-
-- name: delete proxy_hos_bucket
- shell: "curl --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
- register: delete
- run_once: true
- delegate_facts: true
- delegate_to: '{{ master_ip }}'
-
-- name: check delete proxy_hos_bucket status
- fail:
- msg: "HOS 删除 proxy_hos_bucket 桶异常"
- when: delete.stdout != '204'
-
-
-- name: delete session_record_hos_bucket
- shell: "curl --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
- register: delete
- run_once: true
- delegate_facts: true
- delegate_to: '{{ master_ip }}'
-
-- name: check delete session_record_hos_bucket status
- fail:
- msg: "HOS 删除 session_record_hos_bucket 桶异常"
- when: delete.stdout != '204'
+#- name: delete firewall_hos_bucket
+# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+# register: delete
+# run_once: true
+# delegate_facts: true
+# delegate_to: '{{ master_ip }}'
+#
+#- name: check delete firewall_hos_bucket status
+# fail:
+# msg: "HOS 删除 firewall_hos_bucket 桶异常"
+# when: delete.stdout != '204'
+#
+#
+#- name: delete proxy_hos_bucket
+# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+# register: delete
+# run_once: true
+# delegate_facts: true
+# delegate_to: '{{ master_ip }}'
+#
+#- name: check delete proxy_hos_bucket status
+# fail:
+# msg: "HOS 删除 proxy_hos_bucket 桶异常"
+# when: delete.stdout != '204'
+#
+#
+#- name: delete session_record_hos_bucket
+# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+# register: delete
+# run_once: true
+# delegate_facts: true
+# delegate_to: '{{ master_ip }}'
+#
+#- name: check delete session_record_hos_bucket status
+# fail:
+# msg: "HOS 删除 session_record_hos_bucket 桶异常"
+# when: delete.stdout != '204'
- name: create firewall_hos_bucket
- shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
register: table1
run_once: true
delegate_facts: true
@@ -71,7 +71,7 @@
when: table1.stdout.find('200') == '-1' and table1.stdout.find('409') == '-1'
- name: create proxy_hos_bucket
- shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
register: table2
run_once: true
delegate_facts: true
@@ -83,7 +83,7 @@
when: table2.stdout.find('200') == '-1' and table2.stdout.find('409') == '-1'
- name: create session_record_hos_bucket
- shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
register: table3
run_once: true
delegate_facts: true
@@ -95,7 +95,7 @@
when: table3.stdout.find('200') == '-1' and table3.stdout.find('409') == '-1'
- name: Does proxy_hos_bucket exist
- shell: "curl --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep proxy_hos_bucket | wc -l"
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep proxy_hos_bucket | wc -l"
register: bucketa_out
- fail:
@@ -103,7 +103,7 @@
when: bucketa_out.stdout != '1'
- name: Does session_record_hos_bucket exist
- shell: "curl --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep session_record_hos_bucket | wc -l"
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep session_record_hos_bucket | wc -l"
register: bucketb_out
- fail:
@@ -111,7 +111,7 @@
when: bucketb_out.stdout != '1'
- name: Does firewall_hos_bucket exist
- shell: "curl --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep firewall_hos_bucket | wc -l"
+ shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep firewall_hos_bucket | wc -l"
register: bucketc_out
- fail:
diff --git a/parcels/roles/init/galaxy-job-service/files/mysql b/parcels/roles/init/galaxy-job-service/files/mysql
index 66af1a1..eb26146 100755
Binary files a/parcels/roles/init/galaxy-job-service/files/mysql and b/parcels/roles/init/galaxy-job-service/files/mysql differ
diff --git a/parcels/roles/init/hbase/tasks/main.yml b/parcels/roles/init/hbase/tasks/main.yml
index fa1022c..46c03c7 100644
--- a/parcels/roles/init/hbase/tasks/main.yml
+++ b/parcels/roles/init/hbase/tasks/main.yml
@@ -15,8 +15,10 @@
backup: yes
- name: create hbase table
- shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && ./create-hbase-table.sh
+ shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && ./create-hbase-table.sh | grep ERROR | grep -v "already exists"
+ register: result
run_once: true
delegate_facts: true
delegate_to: "{{ master_ip }}"
+ failed_when: "'ERROR' in result.stdout"
diff --git a/parcels/roles/init/hbase/templates/create-hbase-table.sh.j2 b/parcels/roles/init/hbase/templates/create-hbase-table.sh.j2
index 383c8c2..97b9c4f 100644
--- a/parcels/roles/init/hbase/templates/create-hbase-table.sh.j2
+++ b/parcels/roles/init/hbase/templates/create-hbase-table.sh.j2
@@ -5,12 +5,12 @@ source /etc/profile
exec {{ install_path }}/{{ hbase_version }}/bin/hbase shell < 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
create 'sub:subscriber_info', {NAME => 'subscriber_id', VERSIONS => 1}
-create 'ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood', 'DNS Amplification'
+create 'dos:ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood','DNS Amplification'
-list
EOF
diff --git a/parcels/roles/services/galaxy-chproxy/tasks/load_chproxy.yml b/parcels/roles/services/galaxy-chproxy/tasks/load_chproxy.yml
index 83b2501..dd18b3e 100644
--- a/parcels/roles/services/galaxy-chproxy/tasks/load_chproxy.yml
+++ b/parcels/roles/services/galaxy-chproxy/tasks/load_chproxy.yml
@@ -1,3 +1,6 @@
+- name: stop keepalived
+ shell: if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne '0' ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi && service keepalived stop
+
- name: stop and remove {{ chproxy_image_container_name }} container
docker_container:
name: '{{ chproxy_image_container_name }}'
@@ -129,3 +132,6 @@
with_items:
- { ip: '{{ groups.services[0] }}' }
- { ip: '{{ groups.services[1] }}' }
+
+- name: start keepalived
+ shell: 'nohup /bin/bash /etc/keepalived/kp_daemon.sh >/dev/null 2>&1 &'
diff --git a/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml b/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml
index d76fcba..2961ea8 100644
--- a/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml
+++ b/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml
@@ -1,7 +1,32 @@
#关闭旧的自启进程
- name: kill kp_daemon.sh
- shell: ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill 2>/dev/null
+ shell: "if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne 0 ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi"
+- block:
+ - name: Check if the Keepalived service already exists
+ shell: source /etc/profile && rpm -qa | grep keepalived | wc -l
+ register: check_out
+ - name: copy unload_keepalived.sh
+ template:
+ src: unload_keepalived.sh
+ dest: /root/
+ - name: unload keepalived
+ shell: sh /root/unload_keepalived.sh | grep -v "warning"
+ when: check_out.stdout >= '1'
+ - name: Check if the keepalived service already exists
+ shell: source /etc/profile && rpm -qa | grep keepalived | wc -l
+ register: check_out
+ - name: delete unload_keepalived.sh
+ file:
+ path: "/root/unload_keepalived.sh"
+ state: absent
+ - name: To terminate execution
+ fail:
+ msg: "卸载失败,组件可能非本安装部署,请联系开发确认或手动卸载后继续安装"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
+ when: (allowed_unload) == "yes"
#创建文件夹
- name: create keepalived package path:{{ keepalived_package_path }}
diff --git a/parcels/roles/services/galaxy-gateway-keepalive/templates/installKeepAlived.sh.j2 b/parcels/roles/services/galaxy-gateway-keepalive/templates/installKeepAlived.sh.j2
index 0d98e47..4e456a8 100644
--- a/parcels/roles/services/galaxy-gateway-keepalive/templates/installKeepAlived.sh.j2
+++ b/parcels/roles/services/galaxy-gateway-keepalive/templates/installKeepAlived.sh.j2
@@ -67,13 +67,18 @@ installKP(){
chmod +x $keepalivedInstallPath/$keepalivedDaeonName
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动中"
+
chmod +x /etc/rc.d/rc.local
+ upStartNum=`cat /etc/rc.d/rc.local | grep "$keepalivedInstallPath" | wc -l`
+ if [ $upStartNum -eq "0" ];then
echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
- echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动成功"
+ fi
echo "开始启动$keepalivedInstallPath/$keepalivedDaeonName守护进程"
# nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &
+ sed -i -e 's#. /etc/sysconfig/keepalived#. /usr/local/etc/sysconfig/keepalived#g' /etc/init.d/keepalived
+ systemctl daemon-reload
service keepalived start
rm -rf $keepalivedBagPath
}
diff --git a/parcels/roles/services/galaxy-gateway-keepalive/templates/unload_keepalived.sh b/parcels/roles/services/galaxy-gateway-keepalive/templates/unload_keepalived.sh
new file mode 100644
index 0000000..fd8b99a
--- /dev/null
+++ b/parcels/roles/services/galaxy-gateway-keepalive/templates/unload_keepalived.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+#for i in `rpm -qa | grep keepalived` ; do rpm -e --nodeps $i ; done
+
+if [ `ps aux | grep -E "keepalived|kp_daemon" | grep -v grep | wc -l` -ne '0' ] ; then ps aux | grep -E "keepalived|kp_daemon" | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi
+
+rm -rf /etc/keepalived
+
+rm -rf /etc/init.d/keepalived
diff --git a/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml b/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml
index 5d321d7..946a3a2 100644
--- a/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml
+++ b/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml
@@ -1,3 +1,6 @@
+- name: stop keepalived
+ shell: if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne '0' ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi && service keepalived stop
+
- name: stop and remove {{ gateway_image_container_name }} container
docker_container:
name: '{{ gateway_image_container_name }}'
@@ -130,3 +133,7 @@
- { ip: '{{ groups.services[0] }}' }
- { ip: '{{ groups.services[1] }}' }
+#- name: start keepalived
+# shell: nohup /bin/bash /etc/keepalived/kp_daemon.sh >/dev/null 2>&1 &
+ #shell: systemctl start keepalived
+# shell: service keepalived start
diff --git a/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml b/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml
index a7b8ddd..e95fadb 100644
--- a/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml
+++ b/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml
@@ -1,3 +1,34 @@
+#关闭旧的自启进程
+- name: kill kp_daemon.sh
+ shell: "if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne 0 ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi"
+
+- block:
+ - name: Check if the Keepalived service already exists
+ shell: source /etc/profile && rpm -qa | grep keepalived | wc -l
+ register: check_out
+ - name: copy unload_keepalived.sh
+ template:
+ src: unload_keepalived.sh
+ dest: /root/
+ mode: 0755
+ - name: unload keepalived
+ shell: sh /root/unload_keepalived.sh
+ when: check_out.stdout >= '1'
+ - name: Check if the keepalived service already exists
+ shell: source /etc/profile && rpm -qa | grep keepalived | grep -v 'keepalived-1.3.5-1.el7.x86_64' | wc -l
+ register: check_out
+ - name: delete unload_keepalived.sh
+ file:
+ path: "/root/unload_keepalived.sh"
+ state: absent
+ - name: To terminate execution
+ fail:
+ msg: "卸载失败,组件可能非本安装部署,请联系开发确认或手动卸载后继续安装"
+ run_once: true
+ delegate_to: 127.0.0.1
+ when: check_out.stdout >= '1'
+ when: (allowed_unload) == "yes"
+
#创建文件夹
- name: create keepalived package path:{{ keepalived_package_path }}
file:
diff --git a/parcels/roles/services/galaxy-hos-keepalive/templates/installKeepAlived.sh.j2 b/parcels/roles/services/galaxy-hos-keepalive/templates/installKeepAlived.sh.j2
index f6c1947..b27a021 100644
--- a/parcels/roles/services/galaxy-hos-keepalive/templates/installKeepAlived.sh.j2
+++ b/parcels/roles/services/galaxy-hos-keepalive/templates/installKeepAlived.sh.j2
@@ -67,12 +67,17 @@ installKP(){
chmod +x $keepalivedInstallPath/$keepalivedDaeonName
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动中"
- chmod +x /etc/rc.d/rc.local
- echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
- echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
- echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动成功"
+
+ chmod +x /etc/rc.d/rc.local
+ upStartNum=`cat /etc/rc.d/rc.local | grep "$keepalivedInstallPath" | wc -l`
+ if [ $upStartNum -eq "0" ];then
+ echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
+ echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
+ fi
echo "开始启动$keepalivedInstallPath/$keepalivedDaeonName守护进程"
+ sed -i -e 's#. /etc/sysconfig/keepalived#. /usr/local/etc/sysconfig/keepalived#g' /etc/init.d/keepalived
+ systemctl daemon-reload
nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &
rm -rf $keepalivedBagPath
diff --git a/parcels/roles/services/galaxy-hos-keepalive/templates/unload_keepalived.sh b/parcels/roles/services/galaxy-hos-keepalive/templates/unload_keepalived.sh
new file mode 100644
index 0000000..0fcf6f8
--- /dev/null
+++ b/parcels/roles/services/galaxy-hos-keepalive/templates/unload_keepalived.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+#for i in `rpm -qa | grep keepalived` ; do rpm -e --nodeps $i ; done
+
+if [ `ps aux | grep -E "keepalived|kp_daemon" | grep -vE "grep|unload_keepalived" | wc -l` -ne '0' ] ; then ps aux | grep -E "keepalived|kp_daemon" | grep -vE "grep|unload_keepalived" | awk '{print $2}' | xargs kill -9 ; fi
+
+rm -rf /etc/keepalived
+
+rm -rf /etc/init.d/keepalived
diff --git a/parcels/roles/services/galaxy-hos-nginx/tasks/main.yml b/parcels/roles/services/galaxy-hos-nginx/tasks/main.yml
index 26fe1d8..7f8fa26 100644
--- a/parcels/roles/services/galaxy-hos-nginx/tasks/main.yml
+++ b/parcels/roles/services/galaxy-hos-nginx/tasks/main.yml
@@ -1,3 +1,6 @@
+- name: stop keepalived
+ shell: if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne '0' ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi && service keepalived stop
+
- name: stop and remove {{ hos_nginx_image_container_name }} container
docker_container:
name: '{{ hos_nginx_image_container_name }}'
@@ -6,20 +9,20 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
-- name: remove old {{ hos_nginx_hos_service_image_name }} image
+- name: remove old {{ hos_nginx_image_name }} image
docker_image:
- name: '{{ hos_nginx_hos_service_image_name }}'
- tag: '{{ hos_nginx_hos_service_image_tag_name }}'
+ name: '{{ hos_nginx_image_name }}'
+ tag: '{{ hos_nginx_image_tag_name }}'
state: absent
run_once: true
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
- name: create {{ hos_nginx_volume_path }}
file:
@@ -29,8 +32,8 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
- name: create {{ install_path }}/{{ hos_nginx_soft_home_path }}
file:
@@ -40,12 +43,12 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
-- name: copy {{ hos_nginx_hos_service_image_tar_name }}
+- name: copy {{ hos_nginx_image_tar_name }}
copy:
- src: '{{ package_path }}/{{ hos_nginx_hos_service_image_tar_name }}'
+ src: '{{ package_path }}/{{ hos_nginx_image_tar_name }}'
dest: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/'
force: true
backup: yes
@@ -53,14 +56,14 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
-- name: load new {{ hos_nginx_hos_service_image_name }} image from {{ hos_nginx_hos_service_image_tar_name }}
+- name: load new {{ hos_nginx_image_name }} image from {{ hos_nginx_image_tar_name }}
docker_image:
- name: '{{ hos_nginx_hos_service_image_name }}'
- tag: '{{ hos_nginx_hos_service_image_tag_name }}'
- load_path: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/{{ hos_nginx_hos_service_image_tar_name }}'
+ name: '{{ hos_nginx_image_name }}'
+ tag: '{{ hos_nginx_image_tag_name }}'
+ load_path: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/{{ hos_nginx_image_tar_name }}'
source: load
force_tag: yes
force_source: yes
@@ -68,19 +71,19 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
- name: change the image tag
- shell: docker tag nginx-metrics:{{ hos_nginx_hos_service_image_tag_name }} {{ hos_nginx_hos_service_image_name }}:{{ hos_nginx_hos_service_image_tag_name }}
+ shell: docker tag nginx-metrics:{{ hos_nginx_image_tag_name }} {{ hos_nginx_image_name }}:{{ hos_nginx_image_tag_name }}
run_once: true
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
-- name: copy {{ hos_nginx_hos_service_image_name }} docker-compose.yml
+- name: copy {{ hos_nginx_image_name }} docker-compose.yml
template:
src: docker-compose.yml.j2
dest: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/docker-compose.yml'
@@ -89,10 +92,10 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
-- name: copy {{ hos_nginx_hos_service_image_name }} nginx.conf
+- name: copy {{ hos_nginx_image_name }} nginx.conf
template:
src: nginx.conf.j2
dest: '{{ install_path }}/{{ hos_nginx_volume_path }}/nginx.conf'
@@ -101,8 +104,8 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
- name: start {{ hos_nginx_image_container_name }} container
docker_compose:
@@ -111,5 +114,6 @@
delegate_facts: true
delegate_to: "{{ item.ip }}"
with_items:
- - { ip: '{{ groups.hosnginx[0] }}' }
- - { ip: '{{ groups.hosnginx[1] }}' }
+ - { ip: '{{ groups.hoskeepalive[0] }}' }
+ - { ip: '{{ groups.hoskeepalive[1] }}' }
+
diff --git a/parcels/roles/services/galaxy-hos-nginx/templates/docker-compose.yml.j2 b/parcels/roles/services/galaxy-hos-nginx/templates/docker-compose.yml.j2
index f994151..058390d 100644
--- a/parcels/roles/services/galaxy-hos-nginx/templates/docker-compose.yml.j2
+++ b/parcels/roles/services/galaxy-hos-nginx/templates/docker-compose.yml.j2
@@ -1,7 +1,7 @@
version: '3'
services:
nginx:
- image: {{ hos_nginx_hos_service_image_name }}:{{ hos_nginx_hos_service_image_tag_name }}
+ image: {{ hos_nginx_image_name }}:{{ hos_nginx_image_tag_name }}
container_name: {{ hos_nginx_image_container_name }}
restart: always
ports:
diff --git a/parcels/roles/services/galaxy-hos-service/tasks/load_hos.yml b/parcels/roles/services/galaxy-hos-service/tasks/load_hos.yml
index cb5aba6..0c9b86f 100644
--- a/parcels/roles/services/galaxy-hos-service/tasks/load_hos.yml
+++ b/parcels/roles/services/galaxy-hos-service/tasks/load_hos.yml
@@ -67,7 +67,7 @@
backup: yes
- name: push config
- shell: 'curl --data-urlencode content="`cat {{ data_path }}/{{ hos_service_soft_home_path }}/galaxy-hos-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-hos-service.yml&appName=galaxy-hos-service&type=yaml"'
+ shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ data_path }}/{{ hos_service_soft_home_path }}/galaxy-hos-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-hos-service.yml&appName=galaxy-hos-service&type=yaml"'
register: change_out
- fail:
@@ -77,3 +77,20 @@
- name: start {{ hos_service_image_container_name }} container
docker_compose:
project_src: '{{ data_path }}/{{ hos_service_soft_home_path }}'
+
+
+##获取 ip 列表
+#- name: get storm numbus iplist
+# shell: echo "{{ ansible_play_hosts }}" | grep -E -o "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" > {{ data_path }}/{{ hos_service_soft_home_path }}/ip_list
+#
+#- name: copy {{ hos_service_image_name }} registerinstance.sh.j2
+# template:
+# src: registerinstance.sh.j2
+# dest: '{{ data_path }}/{{ hos_service_soft_home_path }}/registerinstance.sh'
+# backup: yes
+#
+#- name: change the image tag
+# shell: cd {{ data_path }}/{{ hos_service_soft_home_path }} && chmod +x registerinstance.sh && ./registerinstance.sh
+
+
+
diff --git a/parcels/roles/services/galaxy-hos-service/templates/application.yml.j2 b/parcels/roles/services/galaxy-hos-service/templates/application.yml.j2
index 713afc9..4141478 100644
--- a/parcels/roles/services/galaxy-hos-service/templates/application.yml.j2
+++ b/parcels/roles/services/galaxy-hos-service/templates/application.yml.j2
@@ -3,7 +3,7 @@ nacos:
type: yaml
server-addr: {{ gateway_keepalive_host }}:8848
namespace: {{ services_config_namespace }}
- data-id: galaxy-hos-service
+ data-id: galaxy-hos-service.yml
auto-refresh: true
group: Galaxy
username: nacos
diff --git a/parcels/roles/services/galaxy-hos-service/templates/registerinstance.sh.j2 b/parcels/roles/services/galaxy-hos-service/templates/registerinstance.sh.j2
new file mode 100644
index 0000000..2b40369
--- /dev/null
+++ b/parcels/roles/services/galaxy-hos-service/templates/registerinstance.sh.j2
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+basedir=`pwd`
+serverAddr="http://{{ gateway_keepalive_host }}:8848"
+namespace="{{ services_config_namespace }}"
+group="Galaxy"
+username="nacos"
+password="{{ nacos_pin }}"
+serviceName="hos"
+hosPort="8186"
+
+for ip in `cat $basedir/hosiplist`
+do
+issuccess=`curl --retry-delay 3 --retry 3 -X POST ''$serverAddr'/nacos/v1/ns/instance?serviceName='$serviceName'&ip='$ip'&port='$hosPort'&namespaceId='$namespace'&groupName='$group'&ephemeral=false&username='$username'&password='$password''`
+if [ `echo $issuccess | grep ok | wc -l` -eq 0 ];then
+ echo "register $ip error"
+fi
+done
+
diff --git a/parcels/roles/services/galaxy-job-service/files/mysql b/parcels/roles/services/galaxy-job-service/files/mysql
index 66af1a1..eb26146 100755
Binary files a/parcels/roles/services/galaxy-job-service/files/mysql and b/parcels/roles/services/galaxy-job-service/files/mysql differ
diff --git a/parcels/roles/services/galaxy-job-service/tasks/load_admin.yml b/parcels/roles/services/galaxy-job-service/tasks/load_admin.yml
index 69c748f..b0ced82 100644
--- a/parcels/roles/services/galaxy-job-service/tasks/load_admin.yml
+++ b/parcels/roles/services/galaxy-job-service/tasks/load_admin.yml
@@ -34,6 +34,19 @@
tag: '{{ admin_image_tag_name }}'
state: absent
+
+- name: stop and remove {{ job_image_container_name }} container
+ docker_container:
+ name: '{{ job_image_container_name }}'
+ state: absent
+
+- name: remove old {{ job_image_name }} image
+ docker_image:
+ name: '{{ job_image_name }}'
+ tag: '{{ job_image_tag_name }}'
+ state: absent
+
+
- name: create {{ admin_volume_path }}
file:
state: directory
diff --git a/parcels/roles/services/galaxy-job-service/tasks/load_job.yml b/parcels/roles/services/galaxy-job-service/tasks/load_job.yml
index aa96ca9..0930268 100644
--- a/parcels/roles/services/galaxy-job-service/tasks/load_job.yml
+++ b/parcels/roles/services/galaxy-job-service/tasks/load_job.yml
@@ -91,7 +91,7 @@
backup: yes
- name: push config
- shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-executor`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-executor.properties&appName=galaxy-job-executor&type=properties"'
+ shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-executor`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-executor.properties&appName=galaxy-job-executor&type=properties"'
register: change_out
- fail:
@@ -99,7 +99,7 @@
when: change_out.stdout != 'true'
- name: push config
- shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-admin`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-admin.properties&appName=galaxy-job-admin&type=properties"'
+ shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-admin`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-admin.properties&appName=galaxy-job-admin&type=properties"'
register: change_out
- fail:
diff --git a/parcels/roles/services/galaxy-job-service/templates/docker-compose.yml.j2 b/parcels/roles/services/galaxy-job-service/templates/docker-compose.yml.j2
index 3a13740..9a3e6b0 100644
--- a/parcels/roles/services/galaxy-job-service/templates/docker-compose.yml.j2
+++ b/parcels/roles/services/galaxy-job-service/templates/docker-compose.yml.j2
@@ -3,7 +3,7 @@ version: '2'
services:
galaxy-job-executor:
image: {{ job_image_name }}:{{ job_image_tag_name }}
- container_name: galaxy-job
+ container_name: {{ job_image_container_name }}
environment:
JAVA_OPTS: "{{ job_java_opts }}"
ports:
@@ -18,7 +18,7 @@ services:
galaxy-job-admin:
image: {{ admin_image_name }}:{{ admin_image_tag_name }}
- container_name: xxl-job-admin
+ container_name: {{ admin_image_container_name }}
environment:
JAVA_OPTS: "{{ job_java_opts }}"
ports:
diff --git a/parcels/roles/services/galaxy-qgw-service/files/dat/ip_v4.mmdb b/parcels/roles/services/galaxy-qgw-service/files/dat/ip_v4.mmdb
index 842cce9..519e3dc 100644
Binary files a/parcels/roles/services/galaxy-qgw-service/files/dat/ip_v4.mmdb and b/parcels/roles/services/galaxy-qgw-service/files/dat/ip_v4.mmdb differ
diff --git a/parcels/roles/services/galaxy-qgw-service/tasks/load_qgw.yml b/parcels/roles/services/galaxy-qgw-service/tasks/load_qgw.yml
index 3a1c44d..4b58870 100644
--- a/parcels/roles/services/galaxy-qgw-service/tasks/load_qgw.yml
+++ b/parcels/roles/services/galaxy-qgw-service/tasks/load_qgw.yml
@@ -1,3 +1,14 @@
+- name: start keepalived service
+ shell: service keepalived start
+
+- name: check keepalived
+ shell: ps aux | grep kp_daemon | grep -v grep | wc -l
+ register: daemon_stat
+
+- name: start keepalived
+ shell: nohup /bin/bash /etc/keepalived/kp_daemon.sh >/dev/null 2>&1 &
+ when: daemon_stat.stdout == '0'
+
- name: get zookeeper_servers to ansible variable
set_fact: zookeeper_servers="{{groups.zookeeper[0]}}:2181,{{groups.zookeeper[1]}}:2181,{{groups.zookeeper[2]}}:2181"
when: '(groups.zookeeper|length) == 3'
@@ -88,7 +99,7 @@
backup: yes
#- name: push config
-# shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ qgw_soft_home_path }}/galaxy-qgw-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-qgw-service.yml&appName=galaxy-qgw-service&type=yaml"'
+# shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ qgw_soft_home_path }}/galaxy-qgw-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-qgw-service.yml&appName=galaxy-qgw-service&type=yaml"'
# register: change_out
#- debug:
diff --git a/parcels/roles/services/galaxy-qgw-service/templates/push_config.sh.j2 b/parcels/roles/services/galaxy-qgw-service/templates/push_config.sh.j2
index c7c3643..a11c7e7 100755
--- a/parcels/roles/services/galaxy-qgw-service/templates/push_config.sh.j2
+++ b/parcels/roles/services/galaxy-qgw-service/templates/push_config.sh.j2
@@ -31,7 +31,7 @@ do
#3.提交config配置
for (( i = 0; i < 10; i++ )); do
if [ "$suffix" = "yaml" ]||[ "$suffix" = "json" ]||[ "$suffix" = "text" ]||[ "$suffix" = "properties" ];then
- result=$(curl -sw '%{http_code}' -o /dev/null --request POST "$nacos_push_url&dataId=$data_id&appName=$appName&type=$suffix" --data-urlencode content="`cat $config_path_file`")
+ result=$(curl --retry-delay 3 --retry 3 -sw '%{http_code}' -o /dev/null --request POST "$nacos_push_url&dataId=$data_id&appName=$appName&type=$suffix" --data-urlencode content="`cat $config_path_file`")
echo "push config response code "$result
if [[ $result -eq '200' ]];then
i=10
diff --git a/parcels/roles/services/galaxy-report-service/tasks/load_report.yml b/parcels/roles/services/galaxy-report-service/tasks/load_report.yml
index b0a4e44..49f950e 100644
--- a/parcels/roles/services/galaxy-report-service/tasks/load_report.yml
+++ b/parcels/roles/services/galaxy-report-service/tasks/load_report.yml
@@ -63,7 +63,7 @@
backup: yes
- name: push config
- shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ report_soft_home_path }}/galaxy-report-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-report-service.yml&appName=galaxy-report-service&type=yaml"'
+ shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ report_soft_home_path }}/galaxy-report-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-report-service.yml&appName=galaxy-report-service&type=yaml"'
register: change_out
- fail:
diff --git a/parcels/test1.yml b/parcels/test1.yml
index 06b9de5..94d0535 100644
--- a/parcels/test1.yml
+++ b/parcels/test1.yml
@@ -27,6 +27,18 @@
# msg: "{{groups['flink'].index}}"
when: index_no >= 0
+ - name: test mysql
+ shell: which mysqlk
+ register: result
+ failed_when: "'FAILED' in result.stderr or result.stdout != '20'"
+ ignore_errors: True
+
+ - name: output result
+ debug:
+ msg: "shuchu: {{ result }}"
+ #when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
+ when: result is undefined or result.stdout != '20'
+
vars_files:
- ../configurations/config.yml
- ../configurations/components.yml