21.08终版
This commit is contained in:
346
.idea/workspace.xml
generated
346
.idea/workspace.xml
generated
@@ -3,7 +3,80 @@
|
|||||||
<component name="ChangeListManager">
|
<component name="ChangeListManager">
|
||||||
<list default="true" id="55c8c5b9-9b57-431e-a0ed-0064b85979fa" name="Default Changelist" comment="">
|
<list default="true" id="55c8c5b9-9b57-431e-a0ed-0064b85979fa" name="Default Changelist" comment="">
|
||||||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/configurations/components.yml" beforeDir="false" afterPath="$PROJECT_DIR$/configurations/components.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/configurations/config.yml" beforeDir="false" afterPath="$PROJECT_DIR$/configurations/config.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/configurations/hosts" beforeDir="false" afterPath="$PROJECT_DIR$/configurations/hosts" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/configurations/services.yml" beforeDir="false" afterPath="$PROJECT_DIR$/configurations/services.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/install.sh" beforeDir="false" afterPath="$PROJECT_DIR$/install.sh" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/init-galaxy-hos-service.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/init-galaxy-hos-service.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/menu/appMenu" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/menu/appMenu" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/menu/commonMenu" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/menu/commonMenu" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/check/check-services/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/check/check-services/tasks/main.yml" afterDir="false" />
|
||||||
<change beforePath="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2" afterDir="false" />
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/druid/files/mysql" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/druid/files/mysql" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/druid/tasks/install_druid.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/druid/tasks/install_druid.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/druid/templates/keepdruidall.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/druid/templates/keepdruidall.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/druid/templates/keepdruiddata.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/druid/templates/keepdruiddata.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/druid/templates/keepdruidquery.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/druid/templates/keepdruidquery.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/druid/templates/unload_druid.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/druid/templates/unload_druid.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/flink/tasks/install_flink.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/flink/tasks/install_flink.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/flink/templates/set_flink_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/flink/templates/set_flink_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/flink/templates/unload_flink.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/flink/templates/unload_flink.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/hadoop/tasks/install_hadoop.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/hadoop/tasks/install_hadoop.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/hadoop/templates/set_hadoop_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/hadoop/templates/set_hadoop_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/hadoop/templates/unload_hadoop.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/hadoop/templates/unload_hadoop.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/hbase/tasks/install_hbase.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/hbase/tasks/install_hbase.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/hbase/templates/set_hbase_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/hbase/templates/set_hbase_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/hbase/templates/unload_hbase.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/hbase/templates/unload_hbase.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/kafka/tasks/install_kafka.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/kafka/tasks/install_kafka.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/kafka/templates/set_kafka_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/kafka/templates/set_kafka_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/kafka/templates/unload_kafka.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/kafka/templates/unload_kafka.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/nacos/files/mysql" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/nacos/files/mysql" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/nacos/tasks/install_nacos.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/nacos/tasks/install_nacos.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/nacos/templates/set-nacos-env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/nacos/templates/set-nacos-env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/nacos/templates/unload_nacos.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/nacos/templates/unload_nacos.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/spark/templates/set_spark_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/spark/templates/set_spark_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/storm/templates/set_storm_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/storm/templates/set_storm_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/storm/templates/unload_storm.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/storm/templates/unload_storm.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/zookeeper/tasks/install_zk.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/zookeeper/tasks/install_zk.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/zookeeper/templates/keepzkalive.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/zookeeper/templates/keepzkalive.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/zookeeper/templates/set_zk_env.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/zookeeper/templates/set_zk_env.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/components/zookeeper/templates/unload_zk.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/components/zookeeper/templates/unload_zk.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/containerd" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/containerd" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/ctr" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/ctr" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/docker" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/docker" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/dockerd" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/dependencies/environments/install_docker/docker/dockerd" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/clickhouse/tasks/init_clickhouse.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/clickhouse/tasks/init_clickhouse.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/dos-baseline/tasks/init_dos.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/dos-baseline/tasks/init_dos.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/druid/files/druid_topology/rule/supervisor-manger" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/druid/files/druid_topology/rule/supervisor-manger" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/druid/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/druid/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/druid/templates/ingestionTask.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/druid/templates/ingestionTask.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/druid/templates/post_rule.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/druid/templates/post_rule.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/flink/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/flink/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/galaxy-hos-service/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/galaxy-hos-service/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/galaxy-job-service/files/mysql" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/galaxy-job-service/files/mysql" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/hbase/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/hbase/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/init/hbase/templates/create-hbase-table.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/init/hbase/templates/create-hbase-table.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-chproxy/tasks/load_chproxy.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-chproxy/tasks/load_chproxy.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/templates/installKeepAlived.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/templates/installKeepAlived.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-keepalive/templates/installKeepAlived.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-keepalive/templates/installKeepAlived.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-nginx/tasks/main.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-nginx/tasks/main.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-nginx/templates/docker-compose.yml.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-nginx/templates/docker-compose.yml.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-service/tasks/load_hos.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-service/tasks/load_hos.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-service/templates/application.yml.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-hos-service/templates/application.yml.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/files/mysql" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/files/mysql" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/tasks/load_admin.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/tasks/load_admin.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/tasks/load_job.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/tasks/load_job.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/templates/docker-compose.yml.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-job-service/templates/docker-compose.yml.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-qgw-service/files/dat/ip_v4.mmdb" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-qgw-service/files/dat/ip_v4.mmdb" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-qgw-service/tasks/load_qgw.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-qgw-service/tasks/load_qgw.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-qgw-service/templates/push_config.sh.j2" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-qgw-service/templates/push_config.sh.j2" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/roles/services/galaxy-report-service/tasks/load_report.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/roles/services/galaxy-report-service/tasks/load_report.yml" afterDir="false" />
|
||||||
|
<change beforePath="$PROJECT_DIR$/parcels/test1.yml" beforeDir="false" afterPath="$PROJECT_DIR$/parcels/test1.yml" afterDir="false" />
|
||||||
</list>
|
</list>
|
||||||
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
|
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
|
||||||
<option name="SHOW_DIALOG" value="false" />
|
<option name="SHOW_DIALOG" value="false" />
|
||||||
@@ -16,44 +89,81 @@
|
|||||||
<file pinned="false" current-in-tab="false">
|
<file pinned="false" current-in-tab="false">
|
||||||
<entry file="file://$PROJECT_DIR$/install.sh">
|
<entry file="file://$PROJECT_DIR$/install.sh">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="301">
|
<state relative-caret-position="420">
|
||||||
<caret line="651" column="19" selection-start-line="651" selection-start-column="19" selection-end-line="651" selection-end-column="19" />
|
<caret line="28" column="15" selection-start-line="28" selection-start-column="15" selection-end-line="28" selection-end-column="15" />
|
||||||
</state>
|
</state>
|
||||||
</provider>
|
</provider>
|
||||||
</entry>
|
</entry>
|
||||||
</file>
|
</file>
|
||||||
<file pinned="false" current-in-tab="false">
|
<file pinned="false" current-in-tab="false">
|
||||||
<entry file="file://$PROJECT_DIR$/configurations/components.yml">
|
<entry file="file://$PROJECT_DIR$/parcels/Clickhouse.yml">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor" />
|
||||||
<state relative-caret-position="412">
|
|
||||||
<caret line="146" column="26" selection-start-line="146" selection-start-column="26" selection-end-line="146" selection-end-column="26" />
|
|
||||||
</state>
|
|
||||||
</provider>
|
|
||||||
</entry>
|
|
||||||
</file>
|
|
||||||
<file pinned="false" current-in-tab="true">
|
|
||||||
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2">
|
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
|
||||||
<state relative-caret-position="570">
|
|
||||||
<caret line="379" column="24" lean-forward="true" selection-start-line="379" selection-start-column="24" selection-end-line="379" selection-end-column="24" />
|
|
||||||
</state>
|
|
||||||
</provider>
|
|
||||||
</entry>
|
</entry>
|
||||||
</file>
|
</file>
|
||||||
<file pinned="false" current-in-tab="false">
|
<file pinned="false" current-in-tab="false">
|
||||||
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/tasks/install_clickhouse.yml">
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/tasks/install_clickhouse.yml">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="292">
|
<state relative-caret-position="15">
|
||||||
<caret line="66" column="63" selection-start-line="66" selection-start-column="44" selection-end-line="66" selection-end-column="63" />
|
<caret line="1" column="6" selection-start-line="1" selection-start-column="2" selection-end-line="1" selection-end-column="6" />
|
||||||
</state>
|
</state>
|
||||||
</provider>
|
</provider>
|
||||||
</entry>
|
</entry>
|
||||||
</file>
|
</file>
|
||||||
<file pinned="false" current-in-tab="false">
|
<file pinned="false" current-in-tab="false">
|
||||||
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/data/tasks/install_clickhouse.yml">
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="578">
|
<state relative-caret-position="1590">
|
||||||
<caret line="40" column="57" selection-start-line="40" selection-start-column="57" selection-end-line="40" selection-end-column="57" />
|
<caret line="106" column="39" selection-start-line="106" selection-start-column="39" selection-end-line="106" selection-end-column="39" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
</file>
|
||||||
|
<file pinned="false" current-in-tab="true">
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/flink/tasks/install_flink.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="104">
|
||||||
|
<caret line="112" column="13" lean-forward="true" selection-start-line="112" selection-start-column="13" selection-end-line="112" selection-end-column="13" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
</file>
|
||||||
|
<file pinned="false" current-in-tab="false">
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/Arangodb.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="75">
|
||||||
|
<caret line="5" column="16" selection-end-line="7" selection-end-column="45" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
</file>
|
||||||
|
<file pinned="false" current-in-tab="false">
|
||||||
|
<entry file="file://$PROJECT_DIR$/configurations/config.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="285">
|
||||||
|
<caret line="19" column="4" selection-start-line="18" selection-end-line="20" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
</file>
|
||||||
|
<file pinned="false" current-in-tab="false">
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor" />
|
||||||
|
</entry>
|
||||||
|
</file>
|
||||||
|
<file pinned="false" current-in-tab="false">
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="90">
|
||||||
|
<caret line="6" column="17" selection-start-line="6" selection-start-column="11" selection-end-line="6" selection-end-column="17" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
</file>
|
||||||
|
<file pinned="false" current-in-tab="false">
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="30">
|
||||||
|
<caret line="2" column="177" selection-start-line="2" selection-start-column="177" selection-end-line="2" selection-end-column="177" />
|
||||||
</state>
|
</state>
|
||||||
</provider>
|
</provider>
|
||||||
</entry>
|
</entry>
|
||||||
@@ -64,6 +174,21 @@
|
|||||||
<findStrings>
|
<findStrings>
|
||||||
<find>clickhouse_ssd_path</find>
|
<find>clickhouse_ssd_path</find>
|
||||||
<find>pol</find>
|
<find>pol</find>
|
||||||
|
<find>home</find>
|
||||||
|
<find>abnorma</find>
|
||||||
|
<find>abnormalCombination</find>
|
||||||
|
<find>checkManager</find>
|
||||||
|
<find>checkCluster</find>
|
||||||
|
<find>installcombination</find>
|
||||||
|
<find>yn</find>
|
||||||
|
<find>common_list</find>
|
||||||
|
<find>set</find>
|
||||||
|
<find>file</find>
|
||||||
|
<find>failed</find>
|
||||||
|
<find>run_</find>
|
||||||
|
<find>or</find>
|
||||||
|
<find>defi</find>
|
||||||
|
<find>absent</find>
|
||||||
</findStrings>
|
</findStrings>
|
||||||
</component>
|
</component>
|
||||||
<component name="Git.Settings">
|
<component name="Git.Settings">
|
||||||
@@ -76,21 +201,24 @@
|
|||||||
<option value="$PROJECT_DIR$/README.md" />
|
<option value="$PROJECT_DIR$/README.md" />
|
||||||
<option value="$PROJECT_DIR$/configurations/config.yml" />
|
<option value="$PROJECT_DIR$/configurations/config.yml" />
|
||||||
<option value="$PROJECT_DIR$/configurations/components.yml" />
|
<option value="$PROJECT_DIR$/configurations/components.yml" />
|
||||||
<option value="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/tasks/install_clickhouse.yml" />
|
|
||||||
<option value="$PROJECT_DIR$/parcels/roles/components/clickhouse/data/tasks/install_clickhouse.yml" />
|
<option value="$PROJECT_DIR$/parcels/roles/components/clickhouse/data/tasks/install_clickhouse.yml" />
|
||||||
<option value="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2" />
|
<option value="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2" />
|
||||||
|
<option value="$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml" />
|
||||||
|
<option value="$PROJECT_DIR$/parcels/roles/components/clickhouse/common/tasks/install_clickhouse.yml" />
|
||||||
</list>
|
</list>
|
||||||
</option>
|
</option>
|
||||||
</component>
|
</component>
|
||||||
<component name="ProjectFrameBounds" extendedState="6" fullScreen="true">
|
<component name="ProjectFrameBounds">
|
||||||
<option name="width" value="2560" />
|
<option name="y" value="25" />
|
||||||
<option name="height" value="1440" />
|
<option name="width" value="1440" />
|
||||||
|
<option name="height" value="810" />
|
||||||
</component>
|
</component>
|
||||||
<component name="ProjectView">
|
<component name="ProjectView">
|
||||||
<navigator proportions="" version="1">
|
<navigator proportions="" version="1">
|
||||||
<foldersAlwaysOnTop value="true" />
|
<foldersAlwaysOnTop value="true" />
|
||||||
</navigator>
|
</navigator>
|
||||||
<panes>
|
<panes>
|
||||||
|
<pane id="Scope" />
|
||||||
<pane id="ProjectPane">
|
<pane id="ProjectPane">
|
||||||
<subPane>
|
<subPane>
|
||||||
<expand>
|
<expand>
|
||||||
@@ -103,11 +231,45 @@
|
|||||||
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
||||||
<item name="configurations" type="462c0819:PsiDirectoryNode" />
|
<item name="configurations" type="462c0819:PsiDirectoryNode" />
|
||||||
</path>
|
</path>
|
||||||
|
<path>
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="b2602c69:ProjectViewProjectNode" />
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="parcels" type="462c0819:PsiDirectoryNode" />
|
||||||
|
</path>
|
||||||
|
<path>
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="b2602c69:ProjectViewProjectNode" />
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="parcels" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="roles" type="462c0819:PsiDirectoryNode" />
|
||||||
|
</path>
|
||||||
|
<path>
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="b2602c69:ProjectViewProjectNode" />
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="parcels" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="roles" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="components" type="462c0819:PsiDirectoryNode" />
|
||||||
|
</path>
|
||||||
|
<path>
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="b2602c69:ProjectViewProjectNode" />
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="parcels" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="roles" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="components" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="flink" type="462c0819:PsiDirectoryNode" />
|
||||||
|
</path>
|
||||||
|
<path>
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="b2602c69:ProjectViewProjectNode" />
|
||||||
|
<item name="Galaxy-auto-deploy-cluster" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="parcels" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="roles" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="components" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="flink" type="462c0819:PsiDirectoryNode" />
|
||||||
|
<item name="tasks" type="462c0819:PsiDirectoryNode" />
|
||||||
|
</path>
|
||||||
</expand>
|
</expand>
|
||||||
<select />
|
<select />
|
||||||
</subPane>
|
</subPane>
|
||||||
</pane>
|
</pane>
|
||||||
<pane id="Scope" />
|
|
||||||
</panes>
|
</panes>
|
||||||
</component>
|
</component>
|
||||||
<component name="PropertiesComponent">
|
<component name="PropertiesComponent">
|
||||||
@@ -146,7 +308,10 @@
|
|||||||
<workItem from="1629854869947" duration="503000" />
|
<workItem from="1629854869947" duration="503000" />
|
||||||
<workItem from="1630466216768" duration="33000" />
|
<workItem from="1630466216768" duration="33000" />
|
||||||
<workItem from="1630466362401" duration="1497000" />
|
<workItem from="1630466362401" duration="1497000" />
|
||||||
<workItem from="1630476153920" duration="1245000" />
|
<workItem from="1630476153920" duration="3228000" />
|
||||||
|
<workItem from="1630891852958" duration="429000" />
|
||||||
|
<workItem from="1630914648031" duration="4146000" />
|
||||||
|
<workItem from="1631066183840" duration="1779000" />
|
||||||
</task>
|
</task>
|
||||||
<task id="LOCAL-00001" summary="1. 新增自动检测安装ansible(本地yum源的方式实现) 2. 抽取后的变量放回各自的roles中(因不想对客户暴露) 3. clickhous初始化脚本关于bifang-maridb的部分变量化 4. 修复一些小bug">
|
<task id="LOCAL-00001" summary="1. 新增自动检测安装ansible(本地yum源的方式实现) 2. 抽取后的变量放回各自的roles中(因不想对客户暴露) 3. clickhous初始化脚本关于bifang-maridb的部分变量化 4. 修复一些小bug">
|
||||||
<created>1629774644583</created>
|
<created>1629774644583</created>
|
||||||
@@ -183,17 +348,24 @@
|
|||||||
<option name="project" value="LOCAL" />
|
<option name="project" value="LOCAL" />
|
||||||
<updated>1630481026679</updated>
|
<updated>1630481026679</updated>
|
||||||
</task>
|
</task>
|
||||||
<option name="localTasksCounter" value="6" />
|
<task id="LOCAL-00006" summary="clickhouse配置文件更改 1. 去掉冷热数据配置">
|
||||||
|
<created>1630481332070</created>
|
||||||
|
<option name="number" value="00006" />
|
||||||
|
<option name="presentableId" value="LOCAL-00006" />
|
||||||
|
<option name="project" value="LOCAL" />
|
||||||
|
<updated>1630481332070</updated>
|
||||||
|
</task>
|
||||||
|
<option name="localTasksCounter" value="7" />
|
||||||
<servers />
|
<servers />
|
||||||
</component>
|
</component>
|
||||||
<component name="TimeTrackingManager">
|
<component name="TimeTrackingManager">
|
||||||
<option name="totallyTimeSpent" value="5797000" />
|
<option name="totallyTimeSpent" value="14134000" />
|
||||||
</component>
|
</component>
|
||||||
<component name="ToolWindowManager">
|
<component name="ToolWindowManager">
|
||||||
<frame x="0" y="0" width="2560" height="1440" extended-state="0" />
|
<frame x="0" y="25" width="1440" height="810" extended-state="0" />
|
||||||
<editor active="true" />
|
<editor active="true" />
|
||||||
<layout>
|
<layout>
|
||||||
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.26370135" />
|
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.2746781" />
|
||||||
<window_info id="Structure" order="1" side_tool="true" weight="0.25" />
|
<window_info id="Structure" order="1" side_tool="true" weight="0.25" />
|
||||||
<window_info id="Favorites" order="2" side_tool="true" />
|
<window_info id="Favorites" order="2" side_tool="true" />
|
||||||
<window_info anchor="bottom" id="Message" order="0" />
|
<window_info anchor="bottom" id="Message" order="0" />
|
||||||
@@ -204,7 +376,7 @@
|
|||||||
<window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
|
<window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
|
||||||
<window_info anchor="bottom" id="TODO" order="6" />
|
<window_info anchor="bottom" id="TODO" order="6" />
|
||||||
<window_info anchor="bottom" id="Docker" order="7" show_stripe_button="false" />
|
<window_info anchor="bottom" id="Docker" order="7" show_stripe_button="false" />
|
||||||
<window_info anchor="bottom" id="Version Control" order="8" visible="true" weight="0.30145985" />
|
<window_info anchor="bottom" id="Version Control" order="8" visible="true" weight="0.3005618" />
|
||||||
<window_info anchor="bottom" id="Database Changes" order="9" />
|
<window_info anchor="bottom" id="Database Changes" order="9" />
|
||||||
<window_info anchor="bottom" id="Event Log" order="10" side_tool="true" />
|
<window_info anchor="bottom" id="Event Log" order="10" side_tool="true" />
|
||||||
<window_info anchor="bottom" id="Terminal" order="11" weight="0.329927" />
|
<window_info anchor="bottom" id="Terminal" order="11" weight="0.329927" />
|
||||||
@@ -227,28 +399,6 @@
|
|||||||
<option name="LAST_COMMIT_MESSAGE" value="clickhouse配置文件更改 1. 去掉冷热数据配置" />
|
<option name="LAST_COMMIT_MESSAGE" value="clickhouse配置文件更改 1. 去掉冷热数据配置" />
|
||||||
</component>
|
</component>
|
||||||
<component name="editorHistoryManager">
|
<component name="editorHistoryManager">
|
||||||
<entry file="file://$PROJECT_DIR$/README.md" />
|
|
||||||
<entry file="file://$PROJECT_DIR$/configurations/config.yml">
|
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
|
||||||
<state relative-caret-position="472">
|
|
||||||
<caret line="64" lean-forward="true" selection-start-line="64" selection-end-line="64" />
|
|
||||||
</state>
|
|
||||||
</provider>
|
|
||||||
</entry>
|
|
||||||
<entry file="file://$PROJECT_DIR$/install.sh">
|
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
|
||||||
<state relative-caret-position="301">
|
|
||||||
<caret line="651" column="19" selection-start-line="651" selection-start-column="19" selection-end-line="651" selection-end-column="19" />
|
|
||||||
</state>
|
|
||||||
</provider>
|
|
||||||
</entry>
|
|
||||||
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/tasks/install_clickhouse.yml">
|
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
|
||||||
<state relative-caret-position="292">
|
|
||||||
<caret line="66" column="63" selection-start-line="66" selection-start-column="44" selection-end-line="66" selection-end-column="63" />
|
|
||||||
</state>
|
|
||||||
</provider>
|
|
||||||
</entry>
|
|
||||||
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/data/tasks/install_clickhouse.yml">
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/data/tasks/install_clickhouse.yml">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="578">
|
<state relative-caret-position="578">
|
||||||
@@ -256,13 +406,6 @@
|
|||||||
</state>
|
</state>
|
||||||
</provider>
|
</provider>
|
||||||
</entry>
|
</entry>
|
||||||
<entry file="file://$PROJECT_DIR$/configurations/components.yml">
|
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
|
||||||
<state relative-caret-position="412">
|
|
||||||
<caret line="146" column="26" selection-start-line="146" selection-start-column="26" selection-end-line="146" selection-end-column="26" />
|
|
||||||
</state>
|
|
||||||
</provider>
|
|
||||||
</entry>
|
|
||||||
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2">
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/templates/config.xml.j2">
|
||||||
<provider selected="true" editor-type-id="text-editor">
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
<state relative-caret-position="570">
|
<state relative-caret-position="570">
|
||||||
@@ -270,5 +413,84 @@
|
|||||||
</state>
|
</state>
|
||||||
</provider>
|
</provider>
|
||||||
</entry>
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/configurations/components.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="412">
|
||||||
|
<caret line="146" column="26" selection-start-line="146" selection-start-column="26" selection-end-line="146" selection-end-column="26" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/README.md">
|
||||||
|
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||||
|
<state split_layout="SPLIT">
|
||||||
|
<first_editor relative-caret-position="105">
|
||||||
|
<caret line="7" selection-start-line="7" selection-end-line="7" />
|
||||||
|
</first_editor>
|
||||||
|
<second_editor />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/install.sh">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="420">
|
||||||
|
<caret line="28" column="15" selection-start-line="28" selection-start-column="15" selection-end-line="28" selection-end-column="15" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/Clickhouse.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor" />
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/clickhouse/common/tasks/install_clickhouse.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="15">
|
||||||
|
<caret line="1" column="6" selection-start-line="1" selection-start-column="2" selection-end-line="1" selection-end-column="6" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/mariadb/install-mariadb-cluster/tasks/install-mariadb.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="1590">
|
||||||
|
<caret line="106" column="39" selection-start-line="106" selection-start-column="39" selection-end-line="106" selection-end-column="39" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/Arangodb.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="75">
|
||||||
|
<caret line="5" column="16" selection-end-line="7" selection-end-column="45" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/configurations/config.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="285">
|
||||||
|
<caret line="19" column="4" selection-start-line="18" selection-end-line="20" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/services/galaxy-hos-keepalive/tasks/main.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor" />
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-keepalive/tasks/main.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="30">
|
||||||
|
<caret line="2" column="177" selection-start-line="2" selection-start-column="177" selection-end-line="2" selection-end-column="177" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/services/galaxy-gateway-nginx/tasks/main.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="90">
|
||||||
|
<caret line="6" column="17" selection-start-line="6" selection-start-column="11" selection-end-line="6" selection-end-column="17" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
|
<entry file="file://$PROJECT_DIR$/parcels/roles/components/flink/tasks/install_flink.yml">
|
||||||
|
<provider selected="true" editor-type-id="text-editor">
|
||||||
|
<state relative-caret-position="104">
|
||||||
|
<caret line="112" column="13" lean-forward="true" selection-start-line="112" selection-start-column="13" selection-end-line="112" selection-end-column="13" />
|
||||||
|
</state>
|
||||||
|
</provider>
|
||||||
|
</entry>
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
@@ -122,6 +122,12 @@ topic_file_bytes: 1073741824
|
|||||||
#缓冲池字节大小 单位,单位:MB
|
#缓冲池字节大小 单位,单位:MB
|
||||||
mariadb_innodb_buffer_pool_size: 1024
|
mariadb_innodb_buffer_pool_size: 1024
|
||||||
|
|
||||||
|
#mariadb 端口
|
||||||
|
galaxy_mariadb_port: 3306
|
||||||
|
|
||||||
|
#mariadb 用户
|
||||||
|
galaxy_mariadb_username: root
|
||||||
|
|
||||||
#galaxy mariadb key
|
#galaxy mariadb key
|
||||||
galaxy_mariadb_pin: galaxy2019
|
galaxy_mariadb_pin: galaxy2019
|
||||||
|
|
||||||
@@ -134,7 +140,10 @@ spark_worker_cores: 30
|
|||||||
|
|
||||||
#===========================Nacos===============================#
|
#===========================Nacos===============================#
|
||||||
#Nacos 内存配置
|
#Nacos 内存配置
|
||||||
nacos_java_opt: '-Xms1024m -Xmx1024m -Xmn1024m'
|
nacos_java_opt: '-Xms256m -Xmx256m -Xmn256m'
|
||||||
|
|
||||||
|
#Galaxy的 Nacos pin,(默认账户为:nacos)用于galaxy-hos-service, galaxy-job-service, galaxy-qgw-service, galaxy-report-service连接nacos获取配置信息
|
||||||
|
nacos_pin: nacos
|
||||||
|
|
||||||
#===========================Storm===============================#
|
#===========================Storm===============================#
|
||||||
#单个supervisor可使用worker数量,一般为CPU的一半。
|
#单个supervisor可使用worker数量,一般为CPU的一半。
|
||||||
@@ -149,25 +158,18 @@ storm_worker_min_mem: 1024
|
|||||||
#===========================Flink================================#
|
#===========================Flink================================#
|
||||||
#网络缓存大小
|
#网络缓存大小
|
||||||
taskmanager_memory_network_min: 512m
|
taskmanager_memory_network_min: 512m
|
||||||
taskmanager_memory_network_max: 1280m
|
taskmanager_memory_network_max: 512m
|
||||||
|
|
||||||
#taskmanager堆外内存
|
#taskmanager堆外内存
|
||||||
taskmanager_memory_managed_size: 256m
|
taskmanager_memory_managed_size: 256m
|
||||||
|
|
||||||
#TaskManager进程占用的所有与Flink相关的内存
|
#TaskManager进程占用的所有与Flink相关的内存
|
||||||
taskmanager_memory_flink_size: 5120m
|
taskmanager_memory_flink_size: 3072m
|
||||||
|
|
||||||
#JobManager进程占用的所有与Flink相关的内存
|
#JobManager进程占用的所有与Flink相关的内存
|
||||||
jobmanager_memory_flink_size: 1024m
|
jobmanager_memory_flink_size: 512m
|
||||||
|
|
||||||
#===========================Zookeeper===============================#
|
#===========================Zookeeper===============================#
|
||||||
#zookeeper 进程 启动内存大小 单位MB
|
#zookeeper 进程 启动内存大小 单位MB
|
||||||
zookeeper_max_mem: 1024
|
zookeeper_max_mem: 1024
|
||||||
|
|
||||||
#===========================Mariadb===============================#
|
|
||||||
#mariadb 端口
|
|
||||||
galaxy_mariadb_port: 3306
|
|
||||||
|
|
||||||
#mariadb 用户
|
|
||||||
galaxy_mariadb_username: root
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ package_path: "{{ playbook_dir | dirname }}/software_packages"
|
|||||||
#============================bifang-Mariadb===============================#
|
#============================bifang-Mariadb===============================#
|
||||||
|
|
||||||
#Bifang MariaDB 地址,用于galaxy-qgw-service、galaxy-report-service、flink-dos的业务使用。
|
#Bifang MariaDB 地址,用于galaxy-qgw-service、galaxy-report-service、flink-dos的业务使用。
|
||||||
bifang_mariadb_host: 192.168.40.153
|
bifang_mariadb_host: 127.0.0.1
|
||||||
|
|
||||||
#Bifang mariadb 端口
|
#Bifang mariadb 端口
|
||||||
bifang_mariadb_port: 3306
|
bifang_mariadb_port: 3306
|
||||||
@@ -31,14 +31,14 @@ bifang_mariadb_port: 3306
|
|||||||
bifang_mariadb_database: tsg-bifang
|
bifang_mariadb_database: tsg-bifang
|
||||||
|
|
||||||
#Bifang mariadb root's pin
|
#Bifang mariadb root's pin
|
||||||
bifang_mariadb_pin: 111111
|
bifang_mariadb_pin: ******
|
||||||
|
|
||||||
#============================虚IP===============================#
|
#============================虚IP===============================#
|
||||||
|
|
||||||
#-------------apps------------------#
|
#-------------apps------------------#
|
||||||
|
|
||||||
#Galaxy 业务组件keepalive IP地址,填写的IP只能是和hosts-services配置的IP是同段的,且不能跟同段内已有的IP相同。
|
#Galaxy 业务组件keepalive IP地址,填写的IP只能是和hosts-services配置的IP是同段的,且不能跟同段内已有的IP相同。
|
||||||
gateway_keepalive_host: 192.168.45.252
|
gateway_keepalive_host: 127.0.0.*
|
||||||
|
|
||||||
#Galaxy 业务组件keepalive绑定网卡名称,不支持两个服务器网卡名称不同的情况,此情况需要安装完成后手动修改配置文件
|
#Galaxy 业务组件keepalive绑定网卡名称,不支持两个服务器网卡名称不同的情况,此情况需要安装完成后手动修改配置文件
|
||||||
gateway_keepalive_interface: eth0
|
gateway_keepalive_interface: eth0
|
||||||
@@ -53,7 +53,7 @@ hos_keepalive_need: "yes"
|
|||||||
|
|
||||||
#galaxy-hos-service服务keepalive IP地址 若 hos_keepalive_need为no,则此处填写gateway_keepalive_host对应的IP
|
#galaxy-hos-service服务keepalive IP地址 若 hos_keepalive_need为no,则此处填写gateway_keepalive_host对应的IP
|
||||||
#填写的IP只能是和hosts-hoskeepalive配置的IP是同段的,且不能跟同段内已有的IP相同。
|
#填写的IP只能是和hosts-hoskeepalive配置的IP是同段的,且不能跟同段内已有的IP相同。
|
||||||
hos_keepalive_host: 192.168.45.253
|
hos_keepalive_host: 127.0.0.*
|
||||||
|
|
||||||
#galaxy-hos-service服务keepalive绑定网卡名称,不支持两个服务器网卡名称不同的情况,此情况需要安装完成后手>动修改配置文件
|
#galaxy-hos-service服务keepalive绑定网卡名称,不支持两个服务器网卡名称不同的情况,此情况需要安装完成后手>动修改配置文件
|
||||||
#若 hos_keepalive_need 配置no则此处 可不进行修改
|
#若 hos_keepalive_need 配置no则此处 可不进行修改
|
||||||
@@ -65,18 +65,15 @@ hos_keepalive_router_id: 62
|
|||||||
|
|
||||||
#============================Kafka===============================#
|
#============================Kafka===============================#
|
||||||
#任务消费kafka地址,若无特殊情况为本安装集群kafka地址,格式:kafkaip1:9092,kafkaip2:9092,kafkaip3:9092.....
|
#任务消费kafka地址,若无特殊情况为本安装集群kafka地址,格式:kafkaip1:9092,kafkaip2:9092,kafkaip3:9092.....
|
||||||
kafka_source_servers: 192.168.45.37:9092,192.168.45.38:9092,192.168.45.39:9092
|
kafka_source_servers: 127.0.0.1:9092
|
||||||
|
|
||||||
#任务结果写入kafka地址,若无特殊情况为本安装集群kafka地址,格式:kafkaip1:9092,kafkaip2:9092,kafkaip3:9092.....
|
#任务结果写入kafka地址,若无特殊情况为本安装集群kafka地址,格式:kafkaip1:9092,kafkaip2:9092,kafkaip3:9092.....
|
||||||
kafka_sink_servers: 192.168.45.37:9092,192.168.45.38:9092,192.168.45.39:9092
|
kafka_sink_servers: 127.0.0.1:9092
|
||||||
|
|
||||||
#============================默认配置===============================#
|
#============================默认配置===============================#
|
||||||
#自研服务组件使用nacos 命名空间名称
|
#自研服务组件使用nacos 命名空间名称
|
||||||
services_config_namespace: prod
|
services_config_namespace: prod
|
||||||
|
|
||||||
#Galaxy的 Nacos pin
|
|
||||||
nacos_pin: nacos
|
|
||||||
|
|
||||||
#Nacos 使用的mariadb 数据库名称
|
#Nacos 使用的mariadb 数据库名称
|
||||||
mariadb_nacos_database: nacos
|
mariadb_nacos_database: nacos
|
||||||
|
|
||||||
|
|||||||
@@ -1,97 +1,54 @@
|
|||||||
#此标签指定的IP用于:chproxy、galaxy-app-nginx、galaxy-job-service、galaxy-qgw-service、galaxy-report-service、galaxy-app-keepalive的安装
|
#此标签指定的IP用于:chproxy、galaxy-app-nginx、galaxy-job-service、galaxy-qgw-service、galaxy-report-service、galaxy-app-keepalive的安装
|
||||||
#最少需要指定两台。
|
#最少需要指定两台。
|
||||||
[services]
|
[services]
|
||||||
192.168.45.42
|
|
||||||
192.168.45.43
|
|
||||||
|
|
||||||
#此标签指定的IP用于:galaxy-hos-service 的安装
|
#此标签指定的IP用于:galaxy-hos-service 的安装
|
||||||
[hos]
|
[hos]
|
||||||
192.168.45.40
|
|
||||||
192.168.45.41
|
|
||||||
|
|
||||||
#此标签指定的IP用于:galaxy-hos-keepalive和galaxy-hos-nginx的安装
|
#此标签指定的IP用于:galaxy-hos-keepalive和galaxy-hos-nginx的安装
|
||||||
#inventories/pro/group_vars/all内的hos_keepalive_need配置是no,则此处不写任何IP,yes就写部署galaxy-hos-service服务keepalive的服务器地址。
|
#inventories/pro/group_vars/all内的hos_keepalive_need配置是no,则此处不写任何IP,yes就写部署galaxy-hos-service服务keepalive的服务器地址。
|
||||||
#不能与services指定的IP相同,且最多为两台。
|
#不能与services指定的IP相同,且最多为两台。
|
||||||
[hoskeepalive]
|
[hoskeepalive]
|
||||||
192.168.45.40
|
|
||||||
192.168.45.41
|
|
||||||
|
|
||||||
#此标签指定的IP用于:ArangoDB的安装
|
#此标签指定的IP用于:ArangoDB的安装
|
||||||
[arangodb]
|
[arangodb]
|
||||||
192.168.45.42
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Mariadb的安装,目前版本下仅可配置两个IP,且需要与services标签的前两个IP相同。
|
#此标签指定的IP用于:Mariadb的安装,目前版本下仅可配置两个IP,且需要与services标签的前两个IP相同。
|
||||||
#现没有负载需求,保留后期做负载的功能需求。
|
#现没有负载需求,保留后期做负载的功能需求。
|
||||||
[mariadb]
|
[mariadb]
|
||||||
192.168.45.42
|
|
||||||
192.168.45.43
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Nacos的安装
|
#此标签指定的IP用于:Nacos的安装
|
||||||
[nacos]
|
[nacos]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Zookeeper的安装
|
#此标签指定的IP用于:Apache Zookeeper的安装
|
||||||
[zookeeper]
|
[zookeeper]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Kafka的安装,集群第一台同时部署Kafka-Manager服务。
|
#此标签指定的IP用于:Apache Kafka的安装,集群第一台同时部署Kafka-Manager服务。
|
||||||
[kafka]
|
[kafka]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Storm的安装,根据内部约定,若集群整体小于3台则第一台为numbus节点;大于三台则前三台为nimbus节点。
|
#此标签指定的IP用于:Apache Storm的安装,根据内部约定,若集群整体小于3台则第一台为numbus节点;大于三台则前三台为nimbus节点。
|
||||||
#[storm]
|
#[storm]
|
||||||
#192.168.45.37
|
|
||||||
#192.168.45.38
|
|
||||||
#192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Flink的安装,根据内部约定,前两台台为master节点;所有节点为worker节点。
|
#此标签指定的IP用于:Apache Flink的安装,根据内部约定,前两台台为master节点;所有节点为worker节点。
|
||||||
[flink]
|
[flink]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Hadoop的安装
|
#此标签指定的IP用于:Apache Hadoop的安装
|
||||||
[hadoop]
|
[hadoop]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache HBase的安装
|
#此标签指定的IP用于:Apache HBase的安装
|
||||||
[hbase]
|
[hbase]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Spark的安装,根据内部约定,若集群小于3台则worker在所有节点上;大于3台则第一个节点为master节点。
|
#此标签指定的IP用于:Apache Spark的安装,根据内部约定,若集群小于3台则worker在所有节点上;大于3台则第一个节点为master节点。
|
||||||
[spark]
|
[spark]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Galaxy-Gohangout的安装
|
#此标签指定的IP用于:Galaxy-Gohangout的安装
|
||||||
[gohangout]
|
[gohangout]
|
||||||
192.168.45.40
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Apache Druid的安装,根据内部约定,若集群整体小于4台则每台为全部节点;大于4台则前两台默认为query节点。
|
#此标签指定的IP用于:Apache Druid的安装,根据内部约定,若集群整体小于4台则每台为全部节点;大于4台则前两台默认为query节点。
|
||||||
[druid]
|
[druid]
|
||||||
192.168.45.37
|
|
||||||
192.168.45.38
|
|
||||||
192.168.45.39
|
|
||||||
|
|
||||||
#此标签指定的IP用于:Clickhouse的安装,根据内部约定,前两台默认为query节点。
|
#此标签指定的IP用于:Clickhouse的安装,根据内部约定,前两台默认为query节点。
|
||||||
[clickhouse]
|
[clickhouse]
|
||||||
192.168.45.40
|
|
||||||
192.168.45.41
|
|
||||||
192.168.45.42
|
|
||||||
192.168.45.43
|
|
||||||
|
|
||||||
#dos检测的离线generate-baseline程序
|
#dos检测的离线generate-baseline程序
|
||||||
[dos_baseline]
|
[dos_baseline]
|
||||||
192.168.45.43
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,8 +11,8 @@ qgw_java_opts: "-Xmx512m -Xms512m"
|
|||||||
|
|
||||||
#=======================galaxy-job-service==========================#
|
#=======================galaxy-job-service==========================#
|
||||||
#galaxy-job-service 服务内存设置
|
#galaxy-job-service 服务内存设置
|
||||||
job_java_opts: "-Xmx512m -Xms512m"
|
job_java_opts: "-Xmx128m -Xms128m"
|
||||||
|
|
||||||
#======================galaxy-report-service=========================#
|
#======================galaxy-report-service=========================#
|
||||||
#galaxy-report-service 服务内存设置
|
#galaxy-report-service 服务内存设置
|
||||||
report_java_opts: "-Xmx512m -Xms512m"
|
report_java_opts: "-Xmx128m -Xms128m"
|
||||||
|
|||||||
48
install.sh
48
install.sh
@@ -1,6 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
#######################基础变量######################
|
#######################基础变量######################
|
||||||
#脚本目录
|
#脚本目录
|
||||||
bin_path=$(cd `dirname $0`; pwd)
|
bin_path=$(cd `dirname $0`; pwd)
|
||||||
@@ -195,7 +194,7 @@ clear
|
|||||||
i=0
|
i=0
|
||||||
#ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Storm Hadoop HBase Clickhouse Druid Spark Arangodb)
|
#ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Storm Hadoop HBase Clickhouse Druid Spark Arangodb)
|
||||||
#替换Storm成Flink
|
#替换Storm成Flink
|
||||||
ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Flink Hadoop HBase Clickhouse Druid Spark Arangodb)
|
ins_names=(Zookeeper Mariadb galaxy-gateway-keepalive Nacos Kafka Hadoop HBase Flink Clickhouse Druid Spark Arangodb)
|
||||||
echo ${ins_names[@]} > $NUM_DIR/common_list
|
echo ${ins_names[@]} > $NUM_DIR/common_list
|
||||||
|
|
||||||
#根据组合进行顺序安装
|
#根据组合进行顺序安装
|
||||||
@@ -338,9 +337,9 @@ clear
|
|||||||
i=0
|
i=0
|
||||||
hoskeep=`cat ../configurations/config.yml | grep -vE "^#|^$" | grep "hos_keepalive_need" | grep yes | wc -l`
|
hoskeep=`cat ../configurations/config.yml | grep -vE "^#|^$" | grep "hos_keepalive_need" | grep yes | wc -l`
|
||||||
if [[ $hoskeep -eq "1" ]]; then
|
if [[ $hoskeep -eq "1" ]]; then
|
||||||
ins_names=(galaxy-gateway-nginx galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-service galaxy-chproxy galaxy-hos-nginx galaxy-hos-keepalive galaxy-gohangout)
|
ins_names=(galaxy-gateway-nginx galaxy-chproxy galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-nginx galaxy-hos-keepalive galaxy-hos-service galaxy-gohangout)
|
||||||
else
|
else
|
||||||
ins_names=(galaxy-gateway-nginx galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-service galaxy-chproxy galaxy-gohangout)
|
ins_names=(galaxy-gateway-nginx galaxy-chproxy galaxy-qgw-service galaxy-job-service galaxy-report-service galaxy-hos-service galaxy-gohangout)
|
||||||
fi
|
fi
|
||||||
echo ${ins_names[@]} > $NUM_DIR/app_list
|
echo ${ins_names[@]} > $NUM_DIR/app_list
|
||||||
|
|
||||||
@@ -648,9 +647,31 @@ fi
|
|||||||
clear
|
clear
|
||||||
cat ../parcels/menu/homePage
|
cat ../parcels/menu/homePage
|
||||||
|
|
||||||
|
if [ -f $NUM_DIR/stepNum ]; then
|
||||||
|
step_num=`cat $NUM_DIR/stepNum`
|
||||||
|
next_step_num=`expr ${step_num} + 1`
|
||||||
|
#if [ ${next_step_num} -eq '5' ]; then
|
||||||
|
# echo -e "\033[33m You have successfully installed, exiting ! ! \033[0m"
|
||||||
|
# break;
|
||||||
|
#fi
|
||||||
|
#if [ ${next_step_num} -ne ${yn_main} ]; then
|
||||||
|
# #echo "Then next step should be ${next_step_num}"
|
||||||
|
# echo -e "\033[33m Then next step should be ${next_step_num} \033[0m"
|
||||||
|
# sleep 3s
|
||||||
|
#continue
|
||||||
|
#fi
|
||||||
|
echo -e "\033[33m Then next step should be ${next_step_num}, but you can chose other number of step if you want ! ! \033[0m"
|
||||||
|
else
|
||||||
|
echo -e "\033[33m Then next step should be 1, but you can chose other number of step if you want ! ! \033[0m"
|
||||||
|
#sleep 3s
|
||||||
|
#continue
|
||||||
|
fi
|
||||||
|
|
||||||
read -p "
|
read -p "
|
||||||
Selection(1-4)? " yn
|
Selection(1-4)? " yn_main
|
||||||
case $yn in
|
|
||||||
|
|
||||||
|
case $yn_main in
|
||||||
[1])
|
[1])
|
||||||
while true; do
|
while true; do
|
||||||
clear
|
clear
|
||||||
@@ -664,39 +685,42 @@ echo " *
|
|||||||
*********************************************************************************************************
|
*********************************************************************************************************
|
||||||
"
|
"
|
||||||
|
|
||||||
read -p "Enter [Y] or [N]: " yn
|
read -p "Enter [yY] or [nN]: " yn
|
||||||
case $yn in
|
case $yn in
|
||||||
[Yy]* )
|
[Yy]* )
|
||||||
installcombination
|
installcombination
|
||||||
|
echo ${yn_main} > $NUM_DIR/stepNum
|
||||||
break;;
|
break;;
|
||||||
[Nn]*)
|
[Nn]*)
|
||||||
break;;
|
break;;
|
||||||
* )
|
* )
|
||||||
echo "Please Enter [Y] or [N].";;
|
echo "Please Enter [yY] or [nN].";;
|
||||||
esac
|
esac
|
||||||
done;;
|
done;;
|
||||||
|
|
||||||
[2] )
|
[2] )
|
||||||
while true; do
|
while true; do
|
||||||
clear
|
clear
|
||||||
#installApps
|
|
||||||
|
|
||||||
cat ../parcels/menu/appMenu
|
cat ../parcels/menu/appMenu
|
||||||
read -p "Enter [Y] or [N]: " yn
|
read -p "Enter [yY] or [nN]: " yn
|
||||||
case $yn in
|
case $yn in
|
||||||
[Yy]* )
|
[Yy]* )
|
||||||
installApps
|
installApps
|
||||||
|
echo ${yn_main} > $NUM_DIR/stepNum
|
||||||
break;;
|
break;;
|
||||||
[Nn]* )
|
[Nn]* )
|
||||||
break;;
|
break;;
|
||||||
* )
|
* )
|
||||||
echo "Please Enter [Y] or [N].";;
|
echo "Please Enter [yY] or [nN].";;
|
||||||
esac
|
esac
|
||||||
done;;
|
done;;
|
||||||
[3] )
|
[3] )
|
||||||
installInit
|
installInit
|
||||||
|
echo ${yn_main} > $NUM_DIR/stepNum
|
||||||
;;
|
;;
|
||||||
[4] )
|
[4] )
|
||||||
installCheck
|
installCheck
|
||||||
|
echo ${yn_main} > $NUM_DIR/stepNum
|
||||||
;;
|
;;
|
||||||
* )
|
* )
|
||||||
echo "Please Enter (1-4)."
|
echo "Please Enter (1-4)."
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
- hosts: services
|
- hosts:
|
||||||
|
- hos
|
||||||
remote_user: root
|
remote_user: root
|
||||||
roles:
|
roles:
|
||||||
- init/galaxy-hos-service
|
- init/galaxy-hos-service
|
||||||
|
|||||||
@@ -1,17 +1,16 @@
|
|||||||
Service Components:
|
Service Components:
|
||||||
|
|
||||||
┌──────────────────────────────────────┐
|
┌───────────────────────────────────┐
|
||||||
├ galaxy-qgw-service ┤
|
├ [*] galaxy-gateway-nginx ┤
|
||||||
├ galaxy-job-service ┤
|
├ [*] galaxy-chproxy ┤
|
||||||
├ galaxy-report-service ┤
|
├ [*] galaxy-qgw-service ┤
|
||||||
├ galaxy-hos-service ┤
|
├ [*] galaxy-job-service ┤
|
||||||
├ galaxy-chproxy ┤
|
├ [*] galaxy-report-service ┤
|
||||||
├ galaxy-gohangout ┤
|
├ [*] galaxy-hos-nginx ┤
|
||||||
├ galaxy-gateway-nginx ┤
|
├ [*] galaxy-hos-keepalive ┤
|
||||||
├ galaxy-nginx-hos ┤
|
├ [*] galaxy-hos-service ┤
|
||||||
├ galaxy-gateway-keepalive ┤
|
├ [*] galaxy-gohangout ┤
|
||||||
├ galaxy-hos-keepalive ┤
|
└───────────────────────────────────┘
|
||||||
└──────────────────────────────────────┘
|
|
||||||
|
|
||||||
*********************************************************
|
*********************************************************
|
||||||
* Press Ctrl+C or N to exit, Enter or Y to continue. *
|
* Press Ctrl+C or N to exit, Enter or Y to continue. *
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ Open Source Software:
|
|||||||
├ [*] Mariadb ┤
|
├ [*] Mariadb ┤
|
||||||
├ [*] Nacos ┤
|
├ [*] Nacos ┤
|
||||||
├ [*] Apache Kafka ┤
|
├ [*] Apache Kafka ┤
|
||||||
├ [*] Apache Flink ┤
|
|
||||||
├ [*] Apache Hadoop ┤
|
├ [*] Apache Hadoop ┤
|
||||||
├ [*] Apache HBase ┤
|
├ [*] Apache HBase ┤
|
||||||
|
├ [*] Apache Flink ┤
|
||||||
├ [*] Clickhouse ┤
|
├ [*] Clickhouse ┤
|
||||||
├ [*] Apache Druid ┤
|
├ [*] Apache Druid ┤
|
||||||
├ [*] Apache Spark ┤
|
├ [*] Apache Spark ┤
|
||||||
|
|||||||
@@ -1,51 +1,51 @@
|
|||||||
- name: Checking Components Heartbeats
|
- name: Checking Components Heartbeats
|
||||||
shell: "curl -s http://{{ inventory_hostname }}:8183/monitor/health | grep DOWN | grep -v grep | wc -l"
|
shell: "curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/monitor/health | grep DOWN | grep -v grep | wc -l"
|
||||||
register: health
|
register: health
|
||||||
|
|
||||||
- name: Checking Components Heartbeats
|
- name: Checking Components Heartbeats
|
||||||
fail:
|
fail:
|
||||||
msg: "组件心跳检测异常,请通过接口 curl -s http://{{ inventory_hostname }}:8183/monitor/health 检查具体DOWN组件。"
|
msg: "组件心跳检测异常,请通过接口 curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/monitor/health 检查具体DOWN组件。"
|
||||||
when: health.stdout != '0'
|
when: health.stdout != '0'
|
||||||
|
|
||||||
- name: Checking Metadata
|
- name: Checking Metadata
|
||||||
shell: "curl -s http://{{ inventory_hostname }}:8183/diagnosis/metadata | grep '\"status\":200' | grep -v grep | wc -l"
|
shell: "curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/metadata | grep '\"status\":200' | grep -v grep | wc -l"
|
||||||
register: metadata
|
register: metadata
|
||||||
|
|
||||||
- name: Checking Metadata
|
- name: Checking Metadata
|
||||||
fail:
|
fail:
|
||||||
msg: "元数据验证异常,请通过接口 curl -s http://{{ inventory_hostname }}:8183/diagnosis/metadata 检查具体异常信息。"
|
msg: "元数据验证异常,请通过接口 curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/metadata 检查具体异常信息。"
|
||||||
when: metadata.stdout != '1'
|
when: metadata.stdout != '1'
|
||||||
|
|
||||||
- name: Checking SQL
|
- name: Checking SQL
|
||||||
shell: "curl -s http://{{ inventory_hostname }}:8183/diagnosis/runSql | grep '\"status\":200' | grep -v grep | wc -l"
|
shell: "curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/runSql | grep '\"status\":200' | grep -v grep | wc -l"
|
||||||
register: sql
|
register: sql
|
||||||
|
|
||||||
- name: Checking SQL
|
- name: Checking SQL
|
||||||
fail:
|
fail:
|
||||||
msg: "系统内置的SQL数据集执行异常,请通过接口 curl -s http://{{ inventory_hostname }}:8183/diagnosis/runSql 检查具体异常信息。"
|
msg: "系统内置的SQL数据集执行异常,请通过接口 curl --retry-delay 3 --retry 3 -s http://{{ inventory_hostname }}:8183/diagnosis/runSql 检查具体异常信息。"
|
||||||
when: sql.stdout != '1'
|
when: sql.stdout != '1'
|
||||||
|
|
||||||
#TODO 因服务版本问题,于21.07版本开启验证
|
#TODO 因服务版本问题,于21.07版本开启验证
|
||||||
#- name: Checking {{ groups.hos[0] }} galaxy-hos-service
|
#- name: Checking {{ groups.hos[0] }} galaxy-hos-service
|
||||||
# shell: "curl --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor | grep 200 | wc -l"
|
# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor | grep 200 | wc -l"
|
||||||
# register: monitor_out
|
# register: monitor_out
|
||||||
# run_once: true
|
# run_once: true
|
||||||
# delegate_facts: true
|
# delegate_facts: true
|
||||||
# delegate_to: '{{ groups.hos[0] }}'
|
# delegate_to: '{{ groups.hos[0] }}'
|
||||||
#
|
#
|
||||||
#- fail:
|
#- fail:
|
||||||
# msg: "HOS Monitor检测异常,请通过接口 curl --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor 检查具体异常信息"
|
# msg: "HOS Monitor检测异常,请通过接口 curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[0] }}:8186/monitor 检查具体异常信息"
|
||||||
# when: monitor_out.stdout != '1'
|
# when: monitor_out.stdout != '1'
|
||||||
#
|
#
|
||||||
#- name: Checking {{ groups.hos[1] }} galaxy-hos-service
|
#- name: Checking {{ groups.hos[1] }} galaxy-hos-service
|
||||||
# shell: "curl --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor | grep 200 | wc -l"
|
# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor | grep 200 | wc -l"
|
||||||
# register: monitor_out
|
# register: monitor_out
|
||||||
# run_once: true
|
# run_once: true
|
||||||
# delegate_facts: true
|
# delegate_facts: true
|
||||||
# delegate_to: '{{ groups.hos[1] }}'
|
# delegate_to: '{{ groups.hos[1] }}'
|
||||||
#
|
#
|
||||||
#- fail:
|
#- fail:
|
||||||
# msg: "HOS Monitor检测异常,请通过接口 curl --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor 检查具体异常信息"
|
# msg: "HOS Monitor检测异常,请通过接口 curl --retry-delay 3 --retry 3 --location -s -w %{http_code} http://{{ groups.hos[1] }}:8186/monitor 检查具体异常信息"
|
||||||
# when: monitor_out.stdout != '1'
|
# when: monitor_out.stdout != '1'
|
||||||
|
|
||||||
- name: wait gohangout start ,sleep 30s
|
- name: wait gohangout start ,sleep 30s
|
||||||
|
|||||||
Binary file not shown.
@@ -37,9 +37,11 @@
|
|||||||
dest: '{{ install_path }}/unload_druid.sh'
|
dest: '{{ install_path }}/unload_druid.sh'
|
||||||
force: true
|
force: true
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when: check_out.stdout >= '1'
|
# when: check_out.stdout >= '1'
|
||||||
- name: unload Druid
|
- name: unload Druid
|
||||||
shell: cd {{ install_path }} && sh unload_druid.sh
|
shell: cd {{ install_path }} && sh unload_druid.sh
|
||||||
|
- name: unload Druid
|
||||||
|
shell: "if [ `ps aux | grep druid | grep -vE 'grep|ansible|install' | awk '{print $2}' | wc -l` -ne 0 ] ; then ps aux | grep druid | grep -vE 'grep|ansible|install' | awk '{print $2}' | xargs kill -9 ; fi"
|
||||||
when: check_out.stdout >= '1'
|
when: check_out.stdout >= '1'
|
||||||
- name: check if {{ mariadb_druid_database }} exist
|
- name: check if {{ mariadb_druid_database }} exist
|
||||||
shell: mysql -s -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} -e "select count(1) from information_schema.schemata where schema_name='{{ mariadb_druid_database }}'" |grep 1|wc -l
|
shell: mysql -s -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} -e "select count(1) from information_schema.schemata where schema_name='{{ mariadb_druid_database }}'" |grep 1|wc -l
|
||||||
@@ -228,7 +230,8 @@
|
|||||||
dest: '{{ install_path }}/{{ druid_version }}/bin/dae-druid-data.sh'
|
dest: '{{ install_path }}/{{ druid_version }}/bin/dae-druid-data.sh'
|
||||||
mode: 0755
|
mode: 0755
|
||||||
backup: yes
|
backup: yes
|
||||||
when: node_nums > cluster_limit and inventory_hostname != '{{master_ip}}' and inventory_hostname != '{{slave1_ip}}'
|
#when: node_nums > cluster_limit and inventory_hostname != '{{master_ip}}' and inventory_hostname != '{{slave1_ip}}'
|
||||||
|
when: node_nums > cluster_limit and inventory_hostname != master_ip and inventory_hostname != slave1_ip
|
||||||
|
|
||||||
#拷贝druid 配置文件
|
#拷贝druid 配置文件
|
||||||
- name: copy Druid data node keep shell
|
- name: copy Druid data node keep shell
|
||||||
@@ -237,7 +240,8 @@
|
|||||||
dest: '/etc/init.d/keepdruiddata'
|
dest: '/etc/init.d/keepdruiddata'
|
||||||
mode: 0755
|
mode: 0755
|
||||||
backup: yes
|
backup: yes
|
||||||
when: node_nums > cluster_limit and inventory_hostname != '{{master_ip}}' and inventory_hostname != '{{slave1_ip}}'
|
#when: node_nums > cluster_limit and inventory_hostname != '{{master_ip}}' and inventory_hostname != '{{slave1_ip}}'
|
||||||
|
when: node_nums > cluster_limit and inventory_hostname != master_ip and inventory_hostname != slave1_ip
|
||||||
|
|
||||||
#拷贝set_druid_env.sh
|
#拷贝set_druid_env.sh
|
||||||
- name: copy set_druid_env.sh
|
- name: copy set_druid_env.sh
|
||||||
@@ -253,7 +257,7 @@
|
|||||||
|
|
||||||
#拷贝druid 配置文件
|
#拷贝druid 配置文件
|
||||||
- name: start druid query node
|
- name: start druid query node
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepdruidquery 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepdruidquery 'start'
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
@@ -264,12 +268,13 @@
|
|||||||
|
|
||||||
#拷贝druid 配置文件
|
#拷贝druid 配置文件
|
||||||
- name: start druid data node
|
- name: start druid data node
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepdruiddata 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepdruiddata 'start'
|
||||||
when: node_nums > cluster_limit and inventory_hostname != '{{master_ip}}' and inventory_hostname != '{{slave1_ip}}'
|
#when: node_nums > cluster_limit and inventory_hostname != '{{master_ip}}' and inventory_hostname != '{{slave1_ip}}'
|
||||||
|
when: node_nums > cluster_limit and inventory_hostname != master_ip and inventory_hostname != slave1_ip
|
||||||
|
|
||||||
#拷贝druid 配置文件
|
#拷贝druid 配置文件
|
||||||
- name: start druid all node
|
- name: start druid all node
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepdruidall 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepdruidall 'start'
|
||||||
when: node_nums <= cluster_limit
|
when: node_nums <= cluster_limit
|
||||||
|
|
||||||
- name: Ansible delete {{ druid_version }}.tar.gz
|
- name: Ansible delete {{ druid_version }}.tar.gz
|
||||||
|
|||||||
10
parcels/roles/components/druid/templates/bak/broker_jvm.j2
Normal file
10
parcels/roles/components/druid/templates/bak/broker_jvm.j2
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
-server
|
||||||
|
-Xms{{ broker_mem }}m
|
||||||
|
-Xmx{{ broker_mem }}m
|
||||||
|
-XX:MaxDirectMemorySize={{ broker_MaxDirectMemorySize }}m
|
||||||
|
-Duser.timezone=UTC
|
||||||
|
-Dfile.encoding=UTF-8
|
||||||
|
-Djava.io.tmpdir=var/tmp
|
||||||
|
-Dlogfile.name=broker
|
||||||
|
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||||
|
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
druid.service=druid/broker
|
||||||
|
druid.plaintextPort=8082
|
||||||
|
druid.sql.planner.sqlTimeZone={{ local_time }}
|
||||||
|
|
||||||
|
# HTTP server settings
|
||||||
|
druid.server.http.numThreads=60
|
||||||
|
|
||||||
|
# HTTP client settings
|
||||||
|
druid.broker.http.numConnections=50
|
||||||
|
druid.broker.http.maxQueuedBytes=10000000
|
||||||
|
|
||||||
|
# Processing threads and buffers
|
||||||
|
druid.processing.buffer.sizeBytes={{ broker_sizeBytes }}
|
||||||
|
druid.processing.numMergeBuffers={{ broker_numMergeBuffers }}
|
||||||
|
druid.processing.numThreads={{ broker_numThreads }}
|
||||||
|
druid.processing.tmpDir=var/druid/processing
|
||||||
|
|
||||||
|
# Query cache disabled -- push down caching and merging instead
|
||||||
|
druid.broker.cache.useCache=false
|
||||||
|
druid.broker.cache.populateCache=false
|
||||||
|
|
||||||
|
druid.query.groupBy.maxMergingDictionarySize=10000000000
|
||||||
|
druid.query.groupBy.maxOnDiskStorage=10000000000
|
||||||
@@ -0,0 +1,151 @@
|
|||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Extensions specified in the load list will be loaded by Druid
|
||||||
|
# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
|
||||||
|
# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
|
||||||
|
|
||||||
|
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
|
||||||
|
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
|
||||||
|
# More info: https://druid.apache.org/docs/latest/operations/including-extensions.html
|
||||||
|
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches", "mysql-metadata-storage", "druid-group-uniq-extension"]
|
||||||
|
|
||||||
|
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
|
||||||
|
# and uncomment the line below to point to your directory.
|
||||||
|
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Hostname
|
||||||
|
#
|
||||||
|
druid.host={{ inventory_hostname }}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Logging
|
||||||
|
#
|
||||||
|
|
||||||
|
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
|
||||||
|
druid.startup.logging.logProperties=true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Zookeeper
|
||||||
|
#
|
||||||
|
|
||||||
|
druid.zk.service.host={{ zookeeper_servers }}
|
||||||
|
druid.zk.paths.base=/druid
|
||||||
|
|
||||||
|
#
|
||||||
|
# Metadata storage
|
||||||
|
#
|
||||||
|
|
||||||
|
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
|
||||||
|
#druid.metadata.storage.type=derby
|
||||||
|
#druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
|
||||||
|
#druid.metadata.storage.connector.host=localhost
|
||||||
|
#druid.metadata.storage.connector.port=1527
|
||||||
|
|
||||||
|
# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
|
||||||
|
druid.metadata.storage.type=mysql
|
||||||
|
druid.metadata.storage.connector.connectURI=jdbc:mysql://{{ gateway_keepalive_host }}:3306/{{ mariadb_druid_database }}
|
||||||
|
druid.metadata.storage.connector.user=root
|
||||||
|
druid.metadata.storage.connector.password={{ galaxy_mariadb_pin }}
|
||||||
|
|
||||||
|
# For PostgreSQL:
|
||||||
|
#druid.metadata.storage.type=postgresql
|
||||||
|
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
|
||||||
|
#druid.metadata.storage.connector.user=...
|
||||||
|
#druid.metadata.storage.connector.password=...
|
||||||
|
|
||||||
|
#
|
||||||
|
# Deep storage
|
||||||
|
#
|
||||||
|
|
||||||
|
# For local disk (only viable in a cluster if this is a network mount):
|
||||||
|
#druid.storage.type=local
|
||||||
|
#druid.storage.storageDirectory=var/druid/segments
|
||||||
|
|
||||||
|
# For HDFS:
|
||||||
|
druid.storage.type=hdfs
|
||||||
|
druid.storage.storageDirectory=/druid/segments
|
||||||
|
|
||||||
|
# For S3:
|
||||||
|
#druid.storage.type=s3
|
||||||
|
#druid.storage.bucket=your-bucket
|
||||||
|
#druid.storage.baseKey=druid/segments
|
||||||
|
#druid.s3.accessKey=...
|
||||||
|
#druid.s3.secretKey=...
|
||||||
|
|
||||||
|
#
|
||||||
|
# Indexing service logs
|
||||||
|
#
|
||||||
|
|
||||||
|
# For local disk (only viable in a cluster if this is a network mount):
|
||||||
|
#druid.indexer.logs.type=file
|
||||||
|
#druid.indexer.logs.directory=var/druid/indexing-logs
|
||||||
|
|
||||||
|
# For HDFS:
|
||||||
|
druid.indexer.logs.type=hdfs
|
||||||
|
druid.indexer.logs.directory=/druid/indexing-logs
|
||||||
|
|
||||||
|
druid.indexer.logs.kill.enabled=true
|
||||||
|
druid.indexer.logs.kill.durationToRetain=604800000
|
||||||
|
druid.indexer.logs.kill.delay=21600000
|
||||||
|
|
||||||
|
# For S3:
|
||||||
|
#druid.indexer.logs.type=s3
|
||||||
|
#druid.indexer.logs.s3Bucket=your-bucket
|
||||||
|
#druid.indexer.logs.s3Prefix=druid/indexing-logs
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Service discovery
|
||||||
|
#
|
||||||
|
|
||||||
|
druid.selectors.indexing.serviceName=druid/overlord
|
||||||
|
druid.selectors.coordinator.serviceName=druid/coordinator
|
||||||
|
|
||||||
|
#
|
||||||
|
# Monitoring
|
||||||
|
#
|
||||||
|
|
||||||
|
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.SysMonitor","org.apache.druid.java.util.metrics.JvmMonitor"]
|
||||||
|
druid.emitter=http
|
||||||
|
druid.emitter.logging.logLevel=info
|
||||||
|
druid.emitter.http.recipientBaseUrl=http://{{ inventory_hostname }}:9903
|
||||||
|
|
||||||
|
# Storage type of double columns
|
||||||
|
# ommiting this will lead to index double as float at the storage layer
|
||||||
|
|
||||||
|
druid.indexing.doubleStorage=double
|
||||||
|
|
||||||
|
#
|
||||||
|
# Security
|
||||||
|
#
|
||||||
|
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password"]
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# SQL
|
||||||
|
#
|
||||||
|
druid.sql.enable=true
|
||||||
|
|
||||||
|
#
|
||||||
|
# Lookups
|
||||||
|
#
|
||||||
|
druid.lookup.enableLookupSyncOnStartup=false
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
-server
|
||||||
|
-Xms{{ coordinator_mem }}m
|
||||||
|
-Xmx{{ coordinator_mem }}m
|
||||||
|
-XX:+UseG1GC
|
||||||
|
-Duser.timezone=UTC
|
||||||
|
-Dfile.encoding=UTF-8
|
||||||
|
-Djava.io.tmpdir=var/tmp
|
||||||
|
-Dlogfile.name=coordinator
|
||||||
|
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||||
|
-Dderby.stream.error.file=var/druid/derby.log
|
||||||
|
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
source /etc/profile
|
||||||
|
|
||||||
|
BASH_DIR={{ install_path }}
|
||||||
|
VERSION={{ druid_version }}
|
||||||
|
|
||||||
|
while true ; do
|
||||||
|
|
||||||
|
metrics=`ps -ef | grep druid-metrics | grep -v grep | wc -l`
|
||||||
|
druid=`ps -ef | grep druid | grep -v grep | grep -v json | grep cluster.conf| wc -l`
|
||||||
|
|
||||||
|
if [ $druid -eq "0" ];then
|
||||||
|
setsid nohup $BASH_DIR/$VERSION/bin/start-cluster-all-server > /dev/null 2>&1 &
|
||||||
|
OLD_NUM=`cat $BASH_DIR/$VERSION/protect/restartsum/all-server`
|
||||||
|
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||||
|
echo $RESTART_NUM > $BASh_DIR/$VERSION/protect/restartsum/all-server
|
||||||
|
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - restart start-cluster-all-server - 重启次数 -> $RESTART_NUM." >> $BASH_DIR/$VERSION/protect/protecthdfs.log
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $metrics -eq "0" ];then
|
||||||
|
nohup java -jar -Xmx1024m -Xms1024m $BASH_DIR/$VERSION/monitor/druid-metrics.jar 9903 > $BASH_DIR/$VERSION/monitor/metrics.log 2>&1 &
|
||||||
|
OLD_NUM=`cat $BASH_DIR/$VERSION/protect/restartsum/metrics`
|
||||||
|
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||||
|
echo $RESTART_NUM > $BASH_DIR/$VERSION/protect/restartsum/metrics
|
||||||
|
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - restart druid-metrics.jar - 重启次数 -> $RESTART_NUM." >> $BASH_DIR/$VERSION/protect/protecthdfs.log
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
sleep 60
|
||||||
|
done
|
||||||
|
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
source /etc/profile
|
||||||
|
|
||||||
|
BASH_DIR={{ install_path }}
|
||||||
|
VERSION={{ druid_version }}
|
||||||
|
|
||||||
|
while true ; do
|
||||||
|
|
||||||
|
metrics=`ps -ef | grep druid-metrics | grep -v grep | wc -l`
|
||||||
|
druid=`ps -ef | grep druid | grep -v grep | grep -v json | grep data.conf | wc -l`
|
||||||
|
|
||||||
|
if [ $druid -eq "0" ];then
|
||||||
|
setsid nohup $BASH_DIR/$VERSION/bin/start-cluster-data-server > /dev/null 2>&1 &
|
||||||
|
OLD_NUM=`cat $BASH_DIR/$VERSION/protect/restartsum/data-server`
|
||||||
|
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||||
|
echo $RESTART_NUM > $BASh_DIR/$VERSION/protect/restartsum/data-server
|
||||||
|
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - restart start-cluster-data-server - 重启次数 -> $RESTART_NUM." >> $BASH_DIR/$VERSION/protect/protecthdfs.log
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $metrics -eq "0" ];then
|
||||||
|
nohup java -jar -Xmx1024m -Xms1024m $BASH_DIR/$VERSION/monitor/druid-metrics.jar 9903 > $BASH_DIR/$VERSION/monitor/metrics.log 2>&1 &
|
||||||
|
OLD_NUM=`cat $BASH_DIR/$VERSION/protect/restartsum/metrics`
|
||||||
|
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||||
|
echo $RESTART_NUM > $BASH_DIR/$VERSION/protect/restartsum/metrics
|
||||||
|
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - restart druid-metrics.jar - 重启次数 -> $RESTART_NUM." >> $BASH_DIR/$VERSION/protect/protecthdfs.log
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
sleep 60
|
||||||
|
done
|
||||||
|
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
source /etc/profile
|
||||||
|
|
||||||
|
BASH_DIR={{ install_path }}
|
||||||
|
VERSION={{ druid_version }}
|
||||||
|
|
||||||
|
while true ; do
|
||||||
|
|
||||||
|
metrics=`ps -ef | grep druid-metrics | grep -v grep | wc -l`
|
||||||
|
druid=`ps -ef | grep druid | grep -v grep | grep -v json | grep master-with-query.conf | wc -l`
|
||||||
|
|
||||||
|
if [ $druid -eq "0" ];then
|
||||||
|
setsid nohup $BASH_DIR/$VERSION/bin/start-cluster-query-server > /dev/null 2>&1 &
|
||||||
|
OLD_NUM=`cat $BASH_DIR/$VERSION/protect/restartsum/query-server`
|
||||||
|
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||||
|
echo $RESTART_NUM > $BASh_DIR/$VERSION/protect/restartsum/query-server
|
||||||
|
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - restart start-cluster-query-server - 重启次数 -> $RESTART_NUM." >> $BASH_DIR/$VERSION/protect/protecthdfs.log
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $metrics -eq "0" ];then
|
||||||
|
nohup java -jar -Xmx1024m -Xms1024m $BASH_DIR/$VERSION/monitor/druid-metrics.jar 9903 > $BASH_DIR/$VERSION/monitor/metrics.log 2>&1 &
|
||||||
|
OLD_NUM=`cat $BASH_DIR/$VERSION/protect/restartsum/metrics`
|
||||||
|
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||||
|
echo $RESTART_NUM > $BASH_DIR/$VERSION/protect/restartsum/metrics
|
||||||
|
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - restart druid-metrics.jar - 重启次数 -> $RESTART_NUM." >> $BASH_DIR/$VERSION/protect/protecthdfs.log
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
sleep 60
|
||||||
|
done
|
||||||
|
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
-server
|
||||||
|
-Xms{{ historical_mem }}m
|
||||||
|
-Xmx{{ historical_mem }}m
|
||||||
|
-XX:MaxDirectMemorySize={{ historical_MaxDirectMemorySize }}m
|
||||||
|
-Duser.timezone=UTC
|
||||||
|
-Dfile.encoding=UTF-8
|
||||||
|
-Djava.io.tmpdir=var/tmp
|
||||||
|
-Dlogfile.name=historical
|
||||||
|
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
|
||||||
|
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
druid.service=druid/historical
|
||||||
|
druid.plaintextPort=8083
|
||||||
|
|
||||||
|
# HTTP server threads
|
||||||
|
druid.server.http.numThreads=60
|
||||||
|
|
||||||
|
# Processing threads and buffers
|
||||||
|
druid.processing.buffer.sizeBytes={{ historical_buffer_sizeBytes }}
|
||||||
|
druid.processing.numMergeBuffers={{ historical_numMergeBuffers }}
|
||||||
|
druid.processing.numThreads={{ historical_numThreads }}
|
||||||
|
druid.processing.tmpDir=var/druid/processing
|
||||||
|
|
||||||
|
# Segment storage
|
||||||
|
#druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":3000000000000}]
|
||||||
|
#druid.server.maxSize=35000000000000
|
||||||
|
druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":{{ segmentCache_max_size }}}]
|
||||||
|
druid.server.maxSize={{ server_disk_maxsize }}
|
||||||
|
|
||||||
|
# Query cache
|
||||||
|
druid.historical.cache.useCache=true
|
||||||
|
druid.historical.cache.populateCache=true
|
||||||
|
druid.cache.type=caffeine
|
||||||
|
druid.cache.sizeInBytes=256000000
|
||||||
|
|
||||||
|
druid.query.groupBy.maxMergingDictionarySize=10000000000
|
||||||
|
druid.query.groupBy.maxOnDiskStorage=10000000000
|
||||||
43
parcels/roles/components/druid/templates/bak/keepdruidall.j2
Normal file
43
parcels/roles/components/druid/templates/bak/keepdruidall.j2
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# netconsole This loads the netconsole module with the configured parameters.
|
||||||
|
#
|
||||||
|
# chkconfig:123456 40 60
|
||||||
|
# description: keepdruidall
|
||||||
|
source /etc/profile
|
||||||
|
PRO_NAME=keepdruidall
|
||||||
|
|
||||||
|
INS_DIR={{ install_path }}
|
||||||
|
#版本
|
||||||
|
VERSION={{ druid_version }}
|
||||||
|
|
||||||
|
case $1 in
|
||||||
|
start)
|
||||||
|
master=`ps -ef | grep dae-druid-all.sh | grep -v grep | wc -l`
|
||||||
|
if [ $master -lt 1 ];then
|
||||||
|
nohup $INS_DIR/$VERSION/bin/dae-druid-all.sh > /dev/null 2>&1 &
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
stop)
|
||||||
|
ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null
|
||||||
|
#keeppid=`ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null`
|
||||||
|
#echo "守护进程PID:$keeppid"
|
||||||
|
#kill -9 $keeppid
|
||||||
|
echo "关闭所有druid进程"
|
||||||
|
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
||||||
|
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
if [ $livenum -ne 0 ];then
|
||||||
|
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9 2>/dev/null
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
status)
|
||||||
|
ps -ef | grep druid | grep -v grep | grep -v json
|
||||||
|
;;
|
||||||
|
|
||||||
|
* )
|
||||||
|
echo "use keepdruidall [start|stop|status]"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# netconsole This loads the netconsole module with the configured parameters.
|
||||||
|
#
|
||||||
|
# chkconfig:123456 40 60
|
||||||
|
# description: keepdruiddata
|
||||||
|
source /etc/profile
|
||||||
|
PRO_NAME=keepdruiddata
|
||||||
|
|
||||||
|
INS_DIR={{ install_path }}
|
||||||
|
#版本
|
||||||
|
VERSION={{ druid_version }}
|
||||||
|
|
||||||
|
case $1 in
|
||||||
|
start)
|
||||||
|
master=`ps -ef | grep dae-druid-data.sh | grep -v grep | wc -l`
|
||||||
|
if [ $master -lt 1 ];then
|
||||||
|
nohup $INS_DIR/$VERSION/bin/dae-druid-data.sh > /dev/null 2>&1 &
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
stop)
|
||||||
|
keeppid=`ps -ef | grep dae-druid-data.sh | grep -v grep | awk '{print $2}'`
|
||||||
|
echo "守护进程PID:$keeppid"
|
||||||
|
kill -9 $keeppid
|
||||||
|
echo "关闭所有druid进程"
|
||||||
|
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
||||||
|
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
if [ $livenum -ne 0 ];then
|
||||||
|
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
status)
|
||||||
|
ps -ef | grep druid | grep -v grep | grep -v json
|
||||||
|
;;
|
||||||
|
|
||||||
|
* )
|
||||||
|
echo "use keepdruiddata [start|stop|status]"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# netconsole This loads the netconsole module with the configured parameters.
|
||||||
|
#
|
||||||
|
# chkconfig:123456 40 60
|
||||||
|
# description: keepdruidquery
|
||||||
|
source /etc/profile
|
||||||
|
PRO_NAME=keepdruidquery
|
||||||
|
|
||||||
|
INS_DIR={{ install_path }}
|
||||||
|
#版本
|
||||||
|
VERSION={{ druid_version }}
|
||||||
|
|
||||||
|
case $1 in
|
||||||
|
start)
|
||||||
|
master=`ps -ef | grep dae-druid-query.sh | grep -v grep | wc -l`
|
||||||
|
if [ $master -lt 1 ];then
|
||||||
|
nohup $INS_DIR/$VERSION/bin/dae-druid-query.sh > /dev/null 2>&1 &
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
stop)
|
||||||
|
keeppid=`ps -ef | grep dae-druid-query.sh | grep -v grep | awk '{print $2}'`
|
||||||
|
echo "守护进程PID:$keeppid"
|
||||||
|
kill -9 $keeppid
|
||||||
|
echo "关闭所有druid进程"
|
||||||
|
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
||||||
|
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
if [ $livenum -ne 0 ];then
|
||||||
|
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
status)
|
||||||
|
ps -ef | grep druid | grep -v grep | grep -v json
|
||||||
|
;;
|
||||||
|
|
||||||
|
* )
|
||||||
|
echo "use keepdruidquery [start|stop|status]"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
76
parcels/roles/components/druid/templates/bak/log4j2.xml.j2
Normal file
76
parcels/roles/components/druid/templates/bak/log4j2.xml.j2
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8" ?>
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing,
|
||||||
|
~ software distributed under the License is distributed on an
|
||||||
|
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
~ KIND, either express or implied. See the License for the
|
||||||
|
~ specific language governing permissions and limitations
|
||||||
|
~ under the License.
|
||||||
|
-->
|
||||||
|
<Configuration status="WARN">
|
||||||
|
<properties>
|
||||||
|
<property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n</property>
|
||||||
|
<property name="patternNoTime">%msg%n</property>
|
||||||
|
<property name="patternMetrics">%d %-8r %m%n</property>
|
||||||
|
<property name="logPath">{{ install_path }}/{{ druid_version }}/var/sv</property>
|
||||||
|
</properties>
|
||||||
|
<appenders>
|
||||||
|
<RollingFile name="STDOUT"
|
||||||
|
fileName="${logPath}/${sys:logfile.name}.log"
|
||||||
|
filePattern="${logPath}/history/${sys:logfile.name}-%d{yyyy-MM-dd}.log.%i.gz">
|
||||||
|
|
||||||
|
<PatternLayout>
|
||||||
|
<pattern>${pattern}</pattern>
|
||||||
|
</PatternLayout>
|
||||||
|
|
||||||
|
<Policies>
|
||||||
|
<SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
|
||||||
|
</Policies>
|
||||||
|
|
||||||
|
<DefaultRolloverStrategy max="10">
|
||||||
|
<Delete basePath="${LOG_PATH}/history" maxDepth="1">
|
||||||
|
<IfFileName glob="*.gz">
|
||||||
|
<IfLastModified age="7d">
|
||||||
|
<!--<IfAny>
|
||||||
|
<IfAccumulatedFileSize exceeds="200 GB" />
|
||||||
|
</IfAny>-->
|
||||||
|
</IfLastModified>
|
||||||
|
</IfFileName>
|
||||||
|
</Delete>
|
||||||
|
</DefaultRolloverStrategy>
|
||||||
|
|
||||||
|
</RollingFile>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
<RollingFile name="METRICS"
|
||||||
|
fileName="${logPath}/${sys:logfile.name}.metrics"
|
||||||
|
filePattern="${logPath}/old/${sys:logfile.name}-%d{yyyy-MM-dd}.metrics.%i.gz">
|
||||||
|
<PatternLayout>
|
||||||
|
<pattern>${patternMetrics}</pattern>
|
||||||
|
</PatternLayout>
|
||||||
|
<Policies>
|
||||||
|
<SizeBasedTriggeringPolicy size="100 MB"/>
|
||||||
|
</Policies>
|
||||||
|
<DefaultRolloverStrategy max="6"/>
|
||||||
|
</RollingFile>
|
||||||
|
-->
|
||||||
|
|
||||||
|
</appenders>
|
||||||
|
|
||||||
|
<loggers>
|
||||||
|
<Root level="error">
|
||||||
|
<appender-ref ref="STDOUT"/>
|
||||||
|
</Root>
|
||||||
|
|
||||||
|
</loggers>
|
||||||
|
</Configuration>
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
druid.service=druid/middleManager
|
||||||
|
druid.plaintextPort=8091
|
||||||
|
|
||||||
|
# Number of tasks per middleManager
|
||||||
|
druid.worker.capacity=200
|
||||||
|
|
||||||
|
# Task launch parameters
|
||||||
|
druid.indexer.runner.javaOpts=-server {{ middlemanager_runner_javaOpts }} -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dlog4j.configurationFile=conf/druid/cluster/_common/log4j2-task.xml
|
||||||
|
druid.indexer.task.baseTaskDir=var/druid/task
|
||||||
|
|
||||||
|
# HTTP server threads
|
||||||
|
druid.server.http.numThreads=60
|
||||||
|
|
||||||
|
# Processing threads and buffers on Peons
|
||||||
|
druid.indexer.fork.property.druid.processing.numMergeBuffers={{ middlemanager_numMergeBuffers }}
|
||||||
|
druid.indexer.fork.property.druid.processing.buffer.sizeBytes={{ middlemanager_buffer_sizeBytes }}
|
||||||
|
druid.indexer.fork.property.druid.processing.numThreads={{ middlemanager_numThreads }}
|
||||||
|
|
||||||
|
# Hadoop indexing
|
||||||
|
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp
|
||||||
|
|
||||||
|
druid.query.groupBy.maxMergingDictionarySize=10000000000
|
||||||
|
druid.query.groupBy.maxOnDiskStorage=10000000000
|
||||||
|
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
#num=`cat /etc/profile | grep druid | wc -l`
|
||||||
|
|
||||||
|
#if [ $num -eq "0" ];then
|
||||||
|
# echo -e "\n#druid" >> /etc/profile
|
||||||
|
# echo -e "export DRUID_HOME={{ install_path }}/{{ druid_version }}" >> /etc/profile
|
||||||
|
# echo -e "export PATH=\$DRUID_HOME/bin:\$PATH" >> /etc/profile
|
||||||
|
# source /etc/profile
|
||||||
|
#fi
|
||||||
|
|
||||||
|
keeppath='/etc/init.d/keepdruidall'
|
||||||
|
if [ -x $keeppath ];then
|
||||||
|
chkconfig --add keepdruidall
|
||||||
|
chkconfig keepdruidall on
|
||||||
|
fi
|
||||||
|
|
||||||
|
keeppath='/etc/init.d/keepdruiddata'
|
||||||
|
if [ -x $keeppath ];then
|
||||||
|
chkconfig --add keepdruiddata
|
||||||
|
chkconfig keepdruiddata on
|
||||||
|
fi
|
||||||
|
|
||||||
|
keeppath='/etc/init.d/keepdruidquery'
|
||||||
|
if [ -x $keeppath ];then
|
||||||
|
chkconfig --add keepdruidquery
|
||||||
|
chkconfig keepdruidquery on
|
||||||
|
fi
|
||||||
|
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
source /etc/profile
|
||||||
|
|
||||||
|
function killService(){
|
||||||
|
keeppath='/etc/init.d/keepdruidall'
|
||||||
|
if [ -x $keeppath ];then
|
||||||
|
service keepdruidall stop
|
||||||
|
chkconfig keepdruidall off
|
||||||
|
systemctl daemon-reload
|
||||||
|
rm -rf /etc/init.d/keepdruidall
|
||||||
|
fi
|
||||||
|
|
||||||
|
keeppath='/etc/init.d/keepdruiddata'
|
||||||
|
if [ -x $keeppath ];then
|
||||||
|
service keepdruiddata stop
|
||||||
|
chkconfig keepdruiddata off
|
||||||
|
systemctl daemon-reload
|
||||||
|
rm -rf /etc/init.d/keepdruiddata
|
||||||
|
fi
|
||||||
|
|
||||||
|
keeppath='/etc/init.d/keepdruidquery'
|
||||||
|
if [ -x $keeppath ];then
|
||||||
|
service keepdruidquery stop
|
||||||
|
chkconfig keepdruidquery off
|
||||||
|
systemctl daemon-reload
|
||||||
|
rm -rf /etc/init.d/keepdruidquery
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function killPid(){
|
||||||
|
|
||||||
|
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
if [ $livenum -ne 0 ];then
|
||||||
|
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
killService
|
||||||
|
sleep 15
|
||||||
|
killPid
|
||||||
|
rm -rf {{ install_path }}/{{ druid_version }}
|
||||||
@@ -20,16 +20,18 @@ fi
|
|||||||
;;
|
;;
|
||||||
|
|
||||||
stop)
|
stop)
|
||||||
ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null
|
keeppid=`ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'`
|
||||||
#keeppid=`ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null`
|
if [ `ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'` -ne 0 ]
|
||||||
#echo "守护进程PID:$keeppid"
|
then
|
||||||
#kill -9 $keeppid
|
echo "守护进程PID:$keeppid"
|
||||||
|
kill -9 $keeppid
|
||||||
echo "关闭所有druid进程"
|
echo "关闭所有druid进程"
|
||||||
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
||||||
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
|
||||||
if [ $livenum -ne 0 ];then
|
|
||||||
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9 2>/dev/null
|
|
||||||
fi
|
fi
|
||||||
|
#livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
#if [ $livenum -ne 0 ];then
|
||||||
|
#ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
||||||
|
#fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
status)
|
status)
|
||||||
|
|||||||
@@ -21,14 +21,17 @@ fi
|
|||||||
|
|
||||||
stop)
|
stop)
|
||||||
keeppid=`ps -ef | grep dae-druid-data.sh | grep -v grep | awk '{print $2}'`
|
keeppid=`ps -ef | grep dae-druid-data.sh | grep -v grep | awk '{print $2}'`
|
||||||
|
if [ `ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'` -ne 0 ]
|
||||||
|
then
|
||||||
echo "守护进程PID:$keeppid"
|
echo "守护进程PID:$keeppid"
|
||||||
kill -9 $keeppid
|
kill -9 $keeppid
|
||||||
echo "关闭所有druid进程"
|
echo "关闭所有druid进程"
|
||||||
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
|
||||||
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
|
||||||
if [ $livenum -ne 0 ];then
|
|
||||||
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
#livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
#if [ $livenum -ne 0 ];then
|
||||||
|
#ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
||||||
|
#fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
status)
|
status)
|
||||||
|
|||||||
@@ -21,14 +21,18 @@ fi
|
|||||||
|
|
||||||
stop)
|
stop)
|
||||||
keeppid=`ps -ef | grep dae-druid-query.sh | grep -v grep | awk '{print $2}'`
|
keeppid=`ps -ef | grep dae-druid-query.sh | grep -v grep | awk '{print $2}'`
|
||||||
|
|
||||||
|
if [ `ps -ef | grep dae-druid-all.sh | grep -v grep | awk '{print $2}'` -ne 0 ]
|
||||||
|
then
|
||||||
echo "守护进程PID:$keeppid"
|
echo "守护进程PID:$keeppid"
|
||||||
kill -9 $keeppid
|
kill -9 $keeppid
|
||||||
echo "关闭所有druid进程"
|
echo "关闭所有druid进程"
|
||||||
#ps aux | grep druid | grep -v grep | awk '{print $2}' | xargs kill -9
|
|
||||||
livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
|
||||||
if [ $livenum -ne 0 ];then
|
|
||||||
ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
#livenum=`ps -ef |grep "druid" | grep -v grep | grep -v json | wc -l`
|
||||||
|
#if [ $livenum -ne 0 ];then
|
||||||
|
#ps aux|grep "druid" |grep -v grep | awk '{print $2}'| xargs kill -9
|
||||||
|
#fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
status)
|
status)
|
||||||
|
|||||||
@@ -38,6 +38,6 @@ fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
killService
|
killService
|
||||||
sleep 15
|
#sleep 15
|
||||||
killPid
|
#killPid
|
||||||
rm -rf {{ install_path }}/{{ druid_version }}
|
rm -rf {{ install_path }}/{{ druid_version }}
|
||||||
|
|||||||
@@ -120,7 +120,7 @@
|
|||||||
|
|
||||||
#启动flink守护
|
#启动flink守护
|
||||||
- name: start keepflinkalive service
|
- name: start keepflinkalive service
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepflinkalive start
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepflinkalive start
|
||||||
when: hostvars[inventory_hostname]['ansible_env'].SSH_CONNECTION.split(' ')[2] == groups.flink[0]
|
when: hostvars[inventory_hostname]['ansible_env'].SSH_CONNECTION.split(' ')[2] == groups.flink[0]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
num=`cat /etc/profile | grep flink | wc -l`
|
|
||||||
|
|
||||||
if [ $num -eq "0" ];then
|
echo -e "\n#flink\nexport FLINK_HOME={{ install_path }}/{{ flink_version }}\nexport PATH=\$FLINK_HOME/bin:\$PATH" >> /etc/profile.d/flink.sh
|
||||||
echo -e "\n#flink" >> /etc/profile
|
chmod +x /etc/profile.d/flink.sh
|
||||||
echo -e "export FLINK_HOME={{ install_path }}/{{ flink_version }}" >> /etc/profile
|
source /etc/profile
|
||||||
echo -e "export PATH=\$FLINK_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppath='/etc/init.d/keepflinkalive'
|
keeppath='/etc/init.d/keepflinkalive'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
|
|||||||
@@ -22,4 +22,5 @@ killService
|
|||||||
sleep 5
|
sleep 5
|
||||||
killPid
|
killPid
|
||||||
rm -rf {{ install_path }}/{{ flink_version }}
|
rm -rf {{ install_path }}/{{ flink_version }}
|
||||||
|
rm -rf /etc/profile.d/flink.sh
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|||||||
@@ -202,7 +202,7 @@
|
|||||||
shell: cd {{ install_path }}/{{ hadoop_version }}/bin/ && ./set_hadoop_env.sh
|
shell: cd {{ install_path }}/{{ hadoop_version }}/bin/ && ./set_hadoop_env.sh
|
||||||
|
|
||||||
- name: start hadoop journal
|
- name: start hadoop journal
|
||||||
shell: source /etc/profile && sh /etc/init.d/keephdfsjournal start
|
shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsjournal start
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
@@ -283,7 +283,7 @@
|
|||||||
delegate_to: "{{ master_ip }}"
|
delegate_to: "{{ master_ip }}"
|
||||||
|
|
||||||
- name: start hadoop-master
|
- name: start hadoop-master
|
||||||
shell: service keephdfsmaster start
|
shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsmaster start
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ master_ip }}"
|
delegate_to: "{{ master_ip }}"
|
||||||
@@ -298,13 +298,13 @@
|
|||||||
delegate_to: "{{ slave1_ip }}"
|
delegate_to: "{{ slave1_ip }}"
|
||||||
|
|
||||||
- name: start hadoop-slave
|
- name: start hadoop-slave
|
||||||
shell: service keephdfsslave start
|
shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsslave start
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ slave1_ip }}"
|
delegate_to: "{{ slave1_ip }}"
|
||||||
|
|
||||||
- name: start hadoop-worker
|
- name: start hadoop-worker
|
||||||
shell: service keephdfsworker start
|
shell: source /etc/profile && /bin/bash /etc/init.d/keephdfsworker start
|
||||||
|
|
||||||
- name: deleted hadoop.tar.gz
|
- name: deleted hadoop.tar.gz
|
||||||
shell: rm -rf {{ install_path }}/hadoop-2.7.1.tar.gz
|
shell: rm -rf {{ install_path }}/hadoop-2.7.1.tar.gz
|
||||||
|
|||||||
@@ -1,15 +1,12 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
num=`cat /etc/profile | grep hadoop | wc -l`
|
|
||||||
|
|
||||||
if [ $num -eq "0" ];then
|
|
||||||
echo -e "\n#hadoop" >> /etc/profile
|
echo -e "\n#hadoop\nexport HADOOP_HOME={{ install_path }}/{{ hadoop_version }}\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH" >> /etc/profile.d/hadoop.sh
|
||||||
echo -e "export HADOOP_HOME={{ install_path }}/{{ hadoop_version }}" >> /etc/profile
|
chmod +x /etc/profile.d/hadoop.sh
|
||||||
echo -e "export PATH=\$HADOOP_HOME/sbin:\$PATH" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$HADOOP_HOME/bin:\$PATH" >> /etc/profile
|
source /etc/profile
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppath='/etc/init.d/keephdfsmaster'
|
keeppath='/etc/init.d/keephdfsmaster'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
|
|||||||
@@ -67,4 +67,4 @@ sleep 15
|
|||||||
killPid
|
killPid
|
||||||
rm -rf {{ install_path }}/{{ hadoop_version }}
|
rm -rf {{ install_path }}/{{ hadoop_version }}
|
||||||
rm -rf {{ install_path }}/hadoop
|
rm -rf {{ install_path }}/hadoop
|
||||||
|
rm -rf /etc/profile.d/hadoop.sh
|
||||||
|
|||||||
@@ -210,7 +210,7 @@
|
|||||||
shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && sh set_hbase_env.sh
|
shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && sh set_hbase_env.sh
|
||||||
|
|
||||||
- name: start hbase master
|
- name: start hbase master
|
||||||
shell: source /etc/profile && sh /etc/init.d/keephbasemaster 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keephbasemaster 'start'
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
@@ -220,7 +220,7 @@
|
|||||||
- { ip: '{{ slave2_ip }}' }
|
- { ip: '{{ slave2_ip }}' }
|
||||||
|
|
||||||
- name: start hbase region
|
- name: start hbase region
|
||||||
shell: source /etc/profile && sh /etc/init.d/keephbaseregion 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keephbaseregion 'start'
|
||||||
|
|
||||||
- name: Ansible delete {{ install_path }}/{{ hbase_version }}.tar.gz
|
- name: Ansible delete {{ install_path }}/{{ hbase_version }}.tar.gz
|
||||||
file:
|
file:
|
||||||
|
|||||||
@@ -1,14 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo -e "\n#hbase\nexport HBASE_HOME={{ data_path }}/{{ hbase_version }}\nexport PATH=\$HBASE_HOME/bin:\$PATH" >> /etc/profile.d/hbase.sh
|
||||||
num=`cat /etc/profile | grep hbase | wc -l`
|
chmod +x /etc/profile.d/hbase.sh
|
||||||
|
source /etc/profile
|
||||||
if [ $num -eq "0" ];then
|
|
||||||
echo -e "\n#hbase" >> /etc/profile
|
|
||||||
echo -e "export HBASE_HOME={{ data_path }}/{{ hbase_version }}" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$HBASE_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppath='/etc/init.d/keephbasemaster'
|
keeppath='/etc/init.d/keephbasemaster'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
|
|||||||
@@ -37,4 +37,4 @@ killService
|
|||||||
sleep 15
|
sleep 15
|
||||||
killPid
|
killPid
|
||||||
rm -rf {{ install_path }}/{{ hbase_version }}
|
rm -rf {{ install_path }}/{{ hbase_version }}
|
||||||
|
rm -rf /etc/profile.d/hbase.sh
|
||||||
|
|||||||
@@ -209,7 +209,7 @@
|
|||||||
|
|
||||||
#启动kafka守护
|
#启动kafka守护
|
||||||
- name: start keepkafalive service
|
- name: start keepkafalive service
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepkafalive 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepkafalive 'start'
|
||||||
|
|
||||||
- name: Ansible delete {{ kafka_version }}.tar.gz
|
- name: Ansible delete {{ kafka_version }}.tar.gz
|
||||||
file:
|
file:
|
||||||
|
|||||||
@@ -1,13 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
num=`cat /etc/profile | grep kafka | wc -l`
|
echo -e "\n#kafka\nexport KAFKA_HOME={{ install_path }}/{{ kafka_version }}\nexport PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile.d/kafka.sh
|
||||||
|
chmod +x /etc/profile.d/kafka.sh
|
||||||
if [ $num -eq "0" ];then
|
source /etc/profile
|
||||||
echo -e "\n#kafka" >> /etc/profile
|
|
||||||
echo -e "export KAFKA_HOME={{ install_path }}/{{ kafka_version }}" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$KAFKA_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppath='/etc/init.d/keepkafalive'
|
keeppath='/etc/init.d/keepkafalive'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
|
|||||||
@@ -24,4 +24,5 @@ sleep 5
|
|||||||
killPid
|
killPid
|
||||||
rm -rf {{ install_path }}/{{ kafka_version }}
|
rm -rf {{ install_path }}/{{ kafka_version }}
|
||||||
rm -rf {{ data_path }}/{{ kafka_version }}
|
rm -rf {{ data_path }}/{{ kafka_version }}
|
||||||
|
rm -rf /etc/profile.d/kafka.sh
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|||||||
@@ -45,39 +45,39 @@
|
|||||||
when: (allowed_unload) == "no"
|
when: (allowed_unload) == "no"
|
||||||
|
|
||||||
- name: check mariadb version; if mariadb does not exist, it will be print error;Don't Worry!!!
|
- name: check mariadb version; if mariadb does not exist, it will be print error;Don't Worry!!!
|
||||||
shell: mysql -uroot
|
#shell: mysql -uroot
|
||||||
ignore_errors: True
|
shell: ps aux | grep mysqld | grep -v grep | wc -l
|
||||||
register: result
|
register: result
|
||||||
|
|
||||||
- name: create directory path:/data/mariadb/logs
|
- name: create directory path:/data/mariadb/logs
|
||||||
file:
|
file:
|
||||||
state: directory
|
state: directory
|
||||||
path: '/data/mariadb/logs'
|
path: '/data/mariadb/logs'
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: create directory path:/data/mariadb/sql
|
- name: create directory path:/data/mariadb/sql
|
||||||
file:
|
file:
|
||||||
state: directory
|
state: directory
|
||||||
path: '/data/mariadb/sql'
|
path: '/data/mariadb/sql'
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: create directory path:{{ install_path }}/mariadb/sql
|
- name: create directory path:{{ install_path }}/mariadb/sql
|
||||||
file:
|
file:
|
||||||
state: directory
|
state: directory
|
||||||
path: '{{ install_path }}/mariadb'
|
path: '{{ install_path }}/mariadb'
|
||||||
when: result.stderr is defined and result.stderr != ''
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: copy
|
- name: copy
|
||||||
copy: src=files/slow_query.log dest=/data/mariadb/logs force=true backup=yes
|
copy: src=files/slow_query.log dest=/data/mariadb/logs force=true backup=yes
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: mariadb service not exist; copy -rpm.zip and unzip mariadb.zip to {{ install_path }}/mariadb
|
- name: mariadb service not exist; copy -rpm.zip and unzip mariadb.zip to {{ install_path }}/mariadb
|
||||||
unarchive: src={{ package_path }}/mariadb.zip dest={{ install_path }}/mariadb copy=yes
|
unarchive: src={{ package_path }}/mariadb.zip dest={{ install_path }}/mariadb copy=yes
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: mariadb service not exist; install mariadb
|
- name: mariadb service not exist; install mariadb
|
||||||
shell: rpm -ivh {{ install_path }}/mariadb/*.rpm --force --nodeps && systemctl start mysql && systemctl enable mysql
|
shell: rpm -ivh {{ install_path }}/mariadb/*.rpm --force --nodeps && systemctl start mysql && systemctl enable mysql
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
register: install_mariadb_result
|
register: install_mariadb_result
|
||||||
|
|
||||||
- name: copy my.cnf
|
- name: copy my.cnf
|
||||||
@@ -87,7 +87,7 @@
|
|||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: '{{ groups.mariadb[0] }}'
|
delegate_to: '{{ groups.mariadb[0] }}'
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: copy my.cnf
|
- name: copy my.cnf
|
||||||
template:
|
template:
|
||||||
@@ -96,15 +96,15 @@
|
|||||||
force: yes
|
force: yes
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: '{{ groups.mariadb[1] }}'
|
delegate_to: '{{ groups.mariadb[1] }}'
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: change mariadb root password
|
- name: change mariadb root password
|
||||||
shell: mysql -uroot -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '{{ galaxy_mariadb_pin }}';"
|
shell: mysql -uroot -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '{{ galaxy_mariadb_pin }}';"
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: change mariadb remote authority
|
- name: change mariadb remote authority
|
||||||
shell: mysql -uroot -p{{ galaxy_mariadb_pin }} -e"use mysql;grant all privileges on *.* to 'root'@'%' identified by '{{ galaxy_mariadb_pin }}' with grant option;FLUSH PRIVILEGES;"
|
shell: mysql -uroot -p{{ galaxy_mariadb_pin }} -e"use mysql;grant all privileges on *.* to 'root'@'%' identified by '{{ galaxy_mariadb_pin }}' with grant option;FLUSH PRIVILEGES;"
|
||||||
when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
when: result.stdout == '0' #有错误信息,说明mariadb服务不存在
|
||||||
|
|
||||||
- name: restart mariadb
|
- name: restart mariadb
|
||||||
shell: systemctl restart mysql
|
shell: systemctl restart mysql
|
||||||
|
|||||||
Binary file not shown.
@@ -156,7 +156,7 @@
|
|||||||
|
|
||||||
#启动守护
|
#启动守护
|
||||||
- name: start keepnacosalive service
|
- name: start keepnacosalive service
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepnacosalive 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepnacosalive 'start'
|
||||||
|
|
||||||
- name: Ansible delete {{ nacos_version }}.tar.gz
|
- name: Ansible delete {{ nacos_version }}.tar.gz
|
||||||
file:
|
file:
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
num=`cat /etc/profile | grep nacos | wc -l`
|
echo -e "\n#nacos\nexport NACOS_HOME={{ install_path }}/{{ nacos_version }}\nexport PATH=\$NACOS_HOME/bin:\$PATH" >> /etc/profile.d/nacos.sh
|
||||||
|
chmod +x /etc/profile.d/nacos.sh
|
||||||
if [ $num -eq "0" ];then
|
|
||||||
echo -e "\n#nacos" >> /etc/profile
|
|
||||||
echo -e "export NACOS_HOME={{ install_path }}/{{ nacos_version }}" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$NACOS_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
fi
|
|
||||||
source /etc/profile
|
source /etc/profile
|
||||||
|
|
||||||
keeppath='/etc/init.d/keepnacosalive'
|
keeppath='/etc/init.d/keepnacosalive'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
chkconfig --add keepnacosalive
|
chkconfig --add keepnacosalive
|
||||||
|
|||||||
@@ -23,4 +23,6 @@ killService
|
|||||||
sleep 10
|
sleep 10
|
||||||
killPid
|
killPid
|
||||||
rm -rf {{ install_path }}/{{ nacos_version }}
|
rm -rf {{ install_path }}/{{ nacos_version }}
|
||||||
|
rm -rf /etc/profile.d/nacos.sh
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
num=`cat /etc/profile | grep spark | wc -l`
|
echo -e "\n#spark\nexport SPARK_HOME={{ install_path }}/{{ spark_version }}\nexport PATH=\$SPARK_HOME/sbin:\$PATH\nexport PATH=\$SPARK_HOME/bin:\$PATH" >> /etc/profile.d/spark.sh
|
||||||
|
chmod +x /etc/profile.d/spark.sh
|
||||||
if [ $num -eq "0" ];then
|
source /etc/profile
|
||||||
echo -e "\n#spark" >> /etc/profile
|
|
||||||
echo -e "export SPARK_HOME={{ install_path }}/{{ spark_version }}" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$SPARK_HOME/sbin:\$PATH" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$SPARK_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppath='/etc/init.d/keepsparkall'
|
keeppath='/etc/init.d/keepsparkall'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
|
|||||||
@@ -1,12 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
num=`cat /etc/profile | grep storm | wc -l`
|
echo -e "\n#storm\nexport STORM_HOME={{ install_path }}/{{ storm_version }}\nexport PATH=\$STORM_HOME/bin:\$PATH" >> /etc/profile.d/storm.sh
|
||||||
if [ $num -eq "0" ];then
|
chmod +x /etc/profile.d/storm.sh
|
||||||
echo -e "\n#storm" >> /etc/profile
|
source /etc/profile
|
||||||
echo -e "export STORM_HOME={{ install_path }}/{{ storm_version }}" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$STORM_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppath='/etc/init.d/keepnimalive'
|
keeppath='/etc/init.d/keepnimalive'
|
||||||
if [ -x $keeppath ];then
|
if [ -x $keeppath ];then
|
||||||
|
|||||||
@@ -37,3 +37,4 @@ killService
|
|||||||
sleep 5
|
sleep 5
|
||||||
killPid
|
killPid
|
||||||
rm -rf {{ install_path }}/{{ storm_version }}
|
rm -rf {{ install_path }}/{{ storm_version }}
|
||||||
|
rm -rf /etc/profile.d/storm.sh
|
||||||
|
|||||||
@@ -157,7 +157,7 @@
|
|||||||
force_source: yes
|
force_source: yes
|
||||||
|
|
||||||
- name: start keepzkalive service
|
- name: start keepzkalive service
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepzkalive 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepzkalive 'start'
|
||||||
|
|
||||||
- name: waiting zookeeper start sleep 10 s
|
- name: waiting zookeeper start sleep 10 s
|
||||||
shell: sleep 10
|
shell: sleep 10
|
||||||
@@ -167,7 +167,7 @@
|
|||||||
register: zkstatus_out
|
register: zkstatus_out
|
||||||
|
|
||||||
- name: start keepzkalive service again
|
- name: start keepzkalive service again
|
||||||
shell: source /etc/profile && sh /etc/init.d/keepzkalive 'start'
|
shell: source /etc/profile && /bin/bash /etc/init.d/keepzkalive 'start'
|
||||||
when: zkstatus_out.stdout != '1'
|
when: zkstatus_out.stdout != '1'
|
||||||
|
|
||||||
- name: start zookeeper_exporter
|
- name: start zookeeper_exporter
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ start)
|
|||||||
keepnum=`ps -ef | grep dae-zookeeper.sh | grep -v grep | wc -l`
|
keepnum=`ps -ef | grep dae-zookeeper.sh | grep -v grep | wc -l`
|
||||||
|
|
||||||
if [ $keepnum -eq "0" ];then
|
if [ $keepnum -eq "0" ];then
|
||||||
nohup {{ install_path }}/{{ zookeeper_version }}/bin/dae-zookeeper.sh /home/bigdata > /dev/null 2>&1 &
|
nohup {{ install_path }}/{{ zookeeper_version }}/bin/dae-zookeeper.sh > /dev/null 2>&1 &
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
num=`cat /etc/profile | grep zookeeper | wc -l`
|
echo -e "\n#zookeeper\nexport ZOOKEEPER_HOME={{ install_path }}/{{ zookeeper_version }}\nexport PATH=\$ZOOKEEPER_HOME/bin:\$PATH" >> /etc/profile.d/zookeeper.sh
|
||||||
|
chmod +x /etc/profile.d/zookeeper.sh
|
||||||
if [ $num -eq "0" ];then
|
|
||||||
echo -e "\n#zookeeper" >> /etc/profile
|
|
||||||
echo -e "export ZOOKEEPER_HOME={{ install_path }}/{{ zookeeper_version }}" >> /etc/profile
|
|
||||||
echo -e "export PATH=\$ZOOKEEPER_HOME/bin:\$PATH" >> /etc/profile
|
|
||||||
source /etc/profile
|
|
||||||
fi
|
|
||||||
|
|
||||||
keeppsth='/etc/init.d/keepzkalive'
|
keeppsth='/etc/init.d/keepzkalive'
|
||||||
if [ -x $keeppsth ];then
|
if [ -x $keeppsth ];then
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ livenum=`jps -l | grep "org.apache.zookeeper.server.quorum.QuorumPeerMain" | wc
|
|||||||
if [ $livenum -ne 0 ];then
|
if [ $livenum -ne 0 ];then
|
||||||
keeppid=`jps -l |grep zookeeper | awk '{print $1}'`
|
keeppid=`jps -l |grep zookeeper | awk '{print $1}'`
|
||||||
kill -9 $keeppid
|
kill -9 $keeppid
|
||||||
|
rm -rf /etc/profile.d/zookeeper.sh
|
||||||
rm -rf {{ install_path }}/{{ zookeeper_version }}
|
rm -rf {{ install_path }}/{{ zookeeper_version }}
|
||||||
rm -rf {{ data_path }}/{{ zookeeper_version }}
|
rm -rf {{ data_path }}/{{ zookeeper_version }}
|
||||||
chkconfig keepzkalive off
|
chkconfig keepzkalive off
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -12,6 +12,12 @@
|
|||||||
delegate_to: "{{ query_ip }}"
|
delegate_to: "{{ query_ip }}"
|
||||||
|
|
||||||
- name: create clickhouse tables
|
- name: create clickhouse tables
|
||||||
|
shell: clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ root_key }} --multiquery < {{ install_path }}/create_ck_table.sql 2>/dev/null
|
||||||
|
run_once: true
|
||||||
|
ignore_errors: True
|
||||||
|
delegate_to: "{{ query_ip }}"
|
||||||
|
|
||||||
|
- name: check clickhouse tables
|
||||||
shell: clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ root_key }} --multiquery < {{ install_path }}/create_ck_table.sql
|
shell: clickhouse-client -h {{ inventory_hostname }} --port 9001 -m -u default --password {{ root_key }} --multiquery < {{ install_path }}/create_ck_table.sql
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ query_ip }}"
|
delegate_to: "{{ query_ip }}"
|
||||||
|
|||||||
@@ -51,7 +51,8 @@
|
|||||||
cron:
|
cron:
|
||||||
name: 'dos-baseline'
|
name: 'dos-baseline'
|
||||||
hour: "3"
|
hour: "3"
|
||||||
weekday: "1"
|
|
||||||
job: '/bin/sh {{ install_path }}/dos-baseline/start.sh'
|
job: '/bin/sh {{ install_path }}/dos-baseline/start.sh'
|
||||||
user: root
|
user: root
|
||||||
delegate_to: "{{ master_ip }}"
|
delegate_to: "{{ master_ip }}"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -18,31 +18,31 @@ case $1 in
|
|||||||
resetAll)
|
resetAll)
|
||||||
for var in ${common_task_name[@]};
|
for var in ${common_task_name[@]};
|
||||||
do
|
do
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
for var in ${hot_task_name[@]};
|
for var in ${hot_task_name[@]};
|
||||||
do
|
do
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/${var}/reset
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
;;
|
;;
|
||||||
|
|
||||||
terminateAll)
|
terminateAll)
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/terminateAll
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/terminateAll
|
||||||
;;
|
;;
|
||||||
|
|
||||||
shutdownAllTasks)
|
shutdownAllTasks)
|
||||||
for var in ${common_task_name[@]};
|
for var in ${common_task_name[@]};
|
||||||
do
|
do
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
for var in ${hot_task_name[@]};
|
for var in ${hot_task_name[@]};
|
||||||
do
|
do
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/${var}/shutdownAllTasks
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
;;
|
;;
|
||||||
@@ -52,7 +52,7 @@ case $1 in
|
|||||||
echo "Usage: supervisor-manger reset <dataSource>"
|
echo "Usage: supervisor-manger reset <dataSource>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/reset
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/reset
|
||||||
;;
|
;;
|
||||||
|
|
||||||
terminate)
|
terminate)
|
||||||
@@ -60,7 +60,7 @@ case $1 in
|
|||||||
echo "Usage: supervisor-manger terminate <dataSource>"
|
echo "Usage: supervisor-manger terminate <dataSource>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/terminate
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/supervisor/$2/terminate
|
||||||
;;
|
;;
|
||||||
|
|
||||||
shutdownTasks)
|
shutdownTasks)
|
||||||
@@ -68,7 +68,7 @@ case $1 in
|
|||||||
echo "Usage: supervisor-manger shutdownTasks <dataSource>"
|
echo "Usage: supervisor-manger shutdownTasks <dataSource>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
curl -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/$2/shutdownAllTasks
|
curl --retry-delay 3 --retry 3 -X 'POST' http://${router_ip}:8089/druid/indexer/v1/datasources/$2/shutdownAllTasks
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Usage: supervisor-manger {resetAll|terminateAll|shutdownAllTasks}"
|
echo "Usage: supervisor-manger {resetAll|terminateAll|shutdownAllTasks}"
|
||||||
|
|||||||
@@ -38,11 +38,13 @@
|
|||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: '{{ hadoop_ip }}'
|
delegate_to: '{{ hadoop_ip }}'
|
||||||
|
|
||||||
- name: check if {{ mariadb_druid_database }} exist
|
#- name: check if {{ mariadb_druid_database }} exist, if 'FAILED message', please ignore ! ! !
|
||||||
shell: mysql -s -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} -e "SELECT COUNT(1) FROM druid.druid_segments WHERE start = '3000-01-02T00:00:00.000Z'"
|
# shell: mysql -s -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} -e "SELECT COUNT(1) FROM druid.druid_segments WHERE start = '3000-01-02T00:00:00.000Z'"
|
||||||
register: segments_nums
|
# register: segments_nums
|
||||||
run_once: true
|
# run_once: true
|
||||||
delegate_to: "127.0.0.1"
|
# delegate_to: "127.0.0.1"
|
||||||
|
# ignore_errors: True
|
||||||
|
# failed_when: "'FAILED' in segments_nums.stderr or segments_nums.stdout != '20'"
|
||||||
|
|
||||||
- name: copy segments.sql to ~
|
- name: copy segments.sql to ~
|
||||||
copy:
|
copy:
|
||||||
@@ -51,13 +53,13 @@
|
|||||||
force: true
|
force: true
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: '127.0.0.1'
|
delegate_to: '127.0.0.1'
|
||||||
when: segments_nums.stdout != '20'
|
#when: segments_nums is defined
|
||||||
|
|
||||||
- name: insert segments to mariadb
|
- name: insert segments to mariadb
|
||||||
shell: 'mysql -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} {{ mariadb_druid_database }} < ~/druid_segments-tsg3.0.sql'
|
shell: 'mysql -uroot -p{{ galaxy_mariadb_pin }} -P3306 -h{{ gateway_keepalive_host }} {{ mariadb_druid_database }} < ~/druid_segments-tsg3.0.sql'
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: '127.0.0.1'
|
delegate_to: '127.0.0.1'
|
||||||
when: segments_nums.stdout != '20'
|
#when: segments_nums is defined
|
||||||
|
|
||||||
- name: copy druid_topology to {{ install_path }}
|
- name: copy druid_topology to {{ install_path }}
|
||||||
copy:
|
copy:
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ task_name=`cat topology_list.txt`
|
|||||||
kafka_host={{ kafka_source_servers }}
|
kafka_host={{ kafka_source_servers }}
|
||||||
druid_host={{ groups.druid[0] }}
|
druid_host={{ groups.druid[0] }}
|
||||||
|
|
||||||
curl -i -XGET 'http://'$druid_host':8081/druid/indexer/v1/leader'
|
curl --retry-delay 3 --retry 3 -i -XGET 'http://'$druid_host':8088/druid/indexer/v1/leader'
|
||||||
num=$?
|
num=$?
|
||||||
while [ $num -gt "0" ];do
|
while [ $num -gt "0" ];do
|
||||||
sleep 5
|
sleep 5
|
||||||
curl -s -XGET 'http://'$druid_host':8081/druid/indexer/v1/leader'
|
curl --retry-delay 3 --retry 3 -s -XGET 'http://'$druid_host':8088/druid/indexer/v1/leader'
|
||||||
num=$?
|
num=$?
|
||||||
done
|
done
|
||||||
cp ./tasks/* ./
|
cp ./tasks/* ./
|
||||||
@@ -18,7 +18,7 @@ cp ./tasks/* ./
|
|||||||
for var in ${task_name[@]};
|
for var in ${task_name[@]};
|
||||||
do
|
do
|
||||||
sed -i 's/kafkabootstrap/'$kafka_host'/' ${var}
|
sed -i 's/kafkabootstrap/'$kafka_host'/' ${var}
|
||||||
curl -X 'POST' -H 'Content-Type:application/json' -d @${var} http://$druid_host:8081/druid/indexer/v1/supervisor
|
curl --retry-delay 3 --retry 3 -X 'POST' -H 'Content-Type:application/json' -d @${var} http://$druid_host:8088/druid/indexer/v1/supervisor
|
||||||
echo "'${var}' 任务启动成功"
|
echo "'${var}' 任务启动成功"
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -14,14 +14,14 @@ hot_rule_file="$base_dir/hot_data_rule.json"
|
|||||||
|
|
||||||
for var in ${common_task_name[@]};
|
for var in ${common_task_name[@]};
|
||||||
do
|
do
|
||||||
curl -X 'POST' -H 'Content-Type:application/json' -d @$common_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
|
curl --retry-delay 3 --retry 3 -X 'POST' -H 'Content-Type:application/json' -d @$common_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
|
||||||
echo "'${var}' 任务启动成功"
|
echo "'${var}' 任务启动成功"
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
for var in ${hot_task_name[@]};
|
for var in ${hot_task_name[@]};
|
||||||
do
|
do
|
||||||
curl -X 'POST' -H 'Content-Type:application/json' -d @$hot_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
|
curl --retry-delay 3 --retry 3 -X 'POST' -H 'Content-Type:application/json' -d @$hot_rule_file http://${router_ip}:8089/druid/coordinator/v1/rules/${var}
|
||||||
echo "'${var}' 任务启动成功"
|
echo "'${var}' 任务启动成功"
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
src: '{{ package_path }}/topology'
|
src: '{{ package_path }}/topology'
|
||||||
dest: '{{ install_path }}/'
|
dest: '{{ install_path }}/'
|
||||||
force: true
|
force: true
|
||||||
backup: yes
|
backup: false
|
||||||
#其他服务器上的包就当备份
|
#其他服务器上的包就当备份
|
||||||
#delegate_to: "{{ master_ip }}"
|
#delegate_to: "{{ master_ip }}"
|
||||||
|
|
||||||
|
|||||||
@@ -7,59 +7,59 @@
|
|||||||
port: 8186
|
port: 8186
|
||||||
timeout: 60
|
timeout: 60
|
||||||
|
|
||||||
- name: init hos service
|
- name: Initialize Galaxy-hos-service
|
||||||
shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/admin/initialize' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/admin/initialize' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: initialize
|
register: initialize
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: '{{ master_ip }}'
|
delegate_to: '{{ master_ip }}'
|
||||||
|
|
||||||
- name: check hos init status
|
- name: Checking Initialize status
|
||||||
fail:
|
fail:
|
||||||
msg: "HOS 内部初始化接口执行失败!请检查"
|
msg: "HOS 内部初始化接口执行失败!请检查"
|
||||||
when: initialize.stdout != '200'
|
when: initialize.stdout != '200'
|
||||||
|
|
||||||
- name: delete firewall_hos_bucket
|
#- name: delete firewall_hos_bucket
|
||||||
shell: "curl --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: delete
|
# register: delete
|
||||||
run_once: true
|
# run_once: true
|
||||||
delegate_facts: true
|
# delegate_facts: true
|
||||||
delegate_to: '{{ master_ip }}'
|
# delegate_to: '{{ master_ip }}'
|
||||||
|
#
|
||||||
- name: check delete firewall_hos_bucket status
|
#- name: check delete firewall_hos_bucket status
|
||||||
fail:
|
# fail:
|
||||||
msg: "HOS 删除 firewall_hos_bucket 桶异常"
|
# msg: "HOS 删除 firewall_hos_bucket 桶异常"
|
||||||
when: delete.stdout != '204'
|
# when: delete.stdout != '204'
|
||||||
|
#
|
||||||
|
#
|
||||||
- name: delete proxy_hos_bucket
|
#- name: delete proxy_hos_bucket
|
||||||
shell: "curl --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: delete
|
# register: delete
|
||||||
run_once: true
|
# run_once: true
|
||||||
delegate_facts: true
|
# delegate_facts: true
|
||||||
delegate_to: '{{ master_ip }}'
|
# delegate_to: '{{ master_ip }}'
|
||||||
|
#
|
||||||
- name: check delete proxy_hos_bucket status
|
#- name: check delete proxy_hos_bucket status
|
||||||
fail:
|
# fail:
|
||||||
msg: "HOS 删除 proxy_hos_bucket 桶异常"
|
# msg: "HOS 删除 proxy_hos_bucket 桶异常"
|
||||||
when: delete.stdout != '204'
|
# when: delete.stdout != '204'
|
||||||
|
#
|
||||||
|
#
|
||||||
- name: delete session_record_hos_bucket
|
#- name: delete session_record_hos_bucket
|
||||||
shell: "curl --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
# shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request DELETE 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: delete
|
# register: delete
|
||||||
run_once: true
|
# run_once: true
|
||||||
delegate_facts: true
|
# delegate_facts: true
|
||||||
delegate_to: '{{ master_ip }}'
|
# delegate_to: '{{ master_ip }}'
|
||||||
|
#
|
||||||
- name: check delete session_record_hos_bucket status
|
#- name: check delete session_record_hos_bucket status
|
||||||
fail:
|
# fail:
|
||||||
msg: "HOS 删除 session_record_hos_bucket 桶异常"
|
# msg: "HOS 删除 session_record_hos_bucket 桶异常"
|
||||||
when: delete.stdout != '204'
|
# when: delete.stdout != '204'
|
||||||
|
|
||||||
|
|
||||||
- name: create firewall_hos_bucket
|
- name: create firewall_hos_bucket
|
||||||
shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/firewall_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: table1
|
register: table1
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
@@ -71,7 +71,7 @@
|
|||||||
when: table1.stdout.find('200') == '-1' and table1.stdout.find('409') == '-1'
|
when: table1.stdout.find('200') == '-1' and table1.stdout.find('409') == '-1'
|
||||||
|
|
||||||
- name: create proxy_hos_bucket
|
- name: create proxy_hos_bucket
|
||||||
shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/proxy_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: table2
|
register: table2
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
@@ -83,7 +83,7 @@
|
|||||||
when: table2.stdout.find('200') == '-1' and table2.stdout.find('409') == '-1'
|
when: table2.stdout.find('200') == '-1' and table2.stdout.find('409') == '-1'
|
||||||
|
|
||||||
- name: create session_record_hos_bucket
|
- name: create session_record_hos_bucket
|
||||||
shell: "curl --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request PUT 'http://{{ master_ip }}:8186/hos/session_record_hos_bucket' --header 'token: c21f969b5f03d33d43e04f8f136e7682'"
|
||||||
register: table3
|
register: table3
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
@@ -95,7 +95,7 @@
|
|||||||
when: table3.stdout.find('200') == '-1' and table3.stdout.find('409') == '-1'
|
when: table3.stdout.find('200') == '-1' and table3.stdout.find('409') == '-1'
|
||||||
|
|
||||||
- name: Does proxy_hos_bucket exist
|
- name: Does proxy_hos_bucket exist
|
||||||
shell: "curl --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep proxy_hos_bucket | wc -l"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep proxy_hos_bucket | wc -l"
|
||||||
register: bucketa_out
|
register: bucketa_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
@@ -103,7 +103,7 @@
|
|||||||
when: bucketa_out.stdout != '1'
|
when: bucketa_out.stdout != '1'
|
||||||
|
|
||||||
- name: Does session_record_hos_bucket exist
|
- name: Does session_record_hos_bucket exist
|
||||||
shell: "curl --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep session_record_hos_bucket | wc -l"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep session_record_hos_bucket | wc -l"
|
||||||
register: bucketb_out
|
register: bucketb_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
@@ -111,7 +111,7 @@
|
|||||||
when: bucketb_out.stdout != '1'
|
when: bucketb_out.stdout != '1'
|
||||||
|
|
||||||
- name: Does firewall_hos_bucket exist
|
- name: Does firewall_hos_bucket exist
|
||||||
shell: "curl --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep firewall_hos_bucket | wc -l"
|
shell: "curl --retry-delay 3 --retry 3 --location -s -w %{http_code} --request GET 'http://{{ master_ip }}:8186/hos/' --header 'token: c21f969b5f03d33d43e04f8f136e7682' |grep firewall_hos_bucket | wc -l"
|
||||||
register: bucketc_out
|
register: bucketc_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
|
|||||||
Binary file not shown.
@@ -15,8 +15,10 @@
|
|||||||
backup: yes
|
backup: yes
|
||||||
|
|
||||||
- name: create hbase table
|
- name: create hbase table
|
||||||
shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && ./create-hbase-table.sh
|
shell: cd {{ install_path }}/{{ hbase_version }}/bin/ && ./create-hbase-table.sh | grep ERROR | grep -v "already exists"
|
||||||
|
register: result
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ master_ip }}"
|
delegate_to: "{{ master_ip }}"
|
||||||
|
failed_when: "'ERROR' in result.stdout"
|
||||||
|
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ source /etc/profile
|
|||||||
exec {{ install_path }}/{{ hbase_version }}/bin/hbase shell <<EOF
|
exec {{ install_path }}/{{ hbase_version }}/bin/hbase shell <<EOF
|
||||||
create_namespace 'tsg'
|
create_namespace 'tsg'
|
||||||
create_namespace 'sub'
|
create_namespace 'sub'
|
||||||
|
create_namespace 'dos'
|
||||||
|
|
||||||
create 'tsg:report_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
|
create 'tsg:report_result', {NAME => 'response', VERSIONS => 1,COMPRESSION => 'GZ',IS_MOB => true, MOB_THRESHOLD => 0}, {NAME => 'detail',COMPRESSION => 'GZ',VERSIONS => 1}
|
||||||
|
|
||||||
create 'sub:subscriber_info', {NAME => 'subscriber_id', VERSIONS => 1}
|
create 'sub:subscriber_info', {NAME => 'subscriber_id', VERSIONS => 1}
|
||||||
|
|
||||||
create 'ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood', 'DNS Amplification'
|
create 'dos:ddos_traffic_baselines', 'TCP SYN Flood','UDP Flood','ICMP Flood','DNS Amplification'
|
||||||
|
|
||||||
list
|
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
- name: stop keepalived
|
||||||
|
shell: if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne '0' ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi && service keepalived stop
|
||||||
|
|
||||||
- name: stop and remove {{ chproxy_image_container_name }} container
|
- name: stop and remove {{ chproxy_image_container_name }} container
|
||||||
docker_container:
|
docker_container:
|
||||||
name: '{{ chproxy_image_container_name }}'
|
name: '{{ chproxy_image_container_name }}'
|
||||||
@@ -129,3 +132,6 @@
|
|||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.services[0] }}' }
|
- { ip: '{{ groups.services[0] }}' }
|
||||||
- { ip: '{{ groups.services[1] }}' }
|
- { ip: '{{ groups.services[1] }}' }
|
||||||
|
|
||||||
|
- name: start keepalived
|
||||||
|
shell: 'nohup /bin/bash /etc/keepalived/kp_daemon.sh >/dev/null 2>&1 &'
|
||||||
|
|||||||
@@ -1,7 +1,32 @@
|
|||||||
#关闭旧的自启进程
|
#关闭旧的自启进程
|
||||||
- name: kill kp_daemon.sh
|
- name: kill kp_daemon.sh
|
||||||
shell: ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill 2>/dev/null
|
shell: "if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne 0 ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi"
|
||||||
|
|
||||||
|
- block:
|
||||||
|
- name: Check if the Keepalived service already exists
|
||||||
|
shell: source /etc/profile && rpm -qa | grep keepalived | wc -l
|
||||||
|
register: check_out
|
||||||
|
- name: copy unload_keepalived.sh
|
||||||
|
template:
|
||||||
|
src: unload_keepalived.sh
|
||||||
|
dest: /root/
|
||||||
|
- name: unload keepalived
|
||||||
|
shell: sh /root/unload_keepalived.sh | grep -v "warning"
|
||||||
|
when: check_out.stdout >= '1'
|
||||||
|
- name: Check if the keepalived service already exists
|
||||||
|
shell: source /etc/profile && rpm -qa | grep keepalived | wc -l
|
||||||
|
register: check_out
|
||||||
|
- name: delete unload_keepalived.sh
|
||||||
|
file:
|
||||||
|
path: "/root/unload_keepalived.sh"
|
||||||
|
state: absent
|
||||||
|
- name: To terminate execution
|
||||||
|
fail:
|
||||||
|
msg: "卸载失败,组件可能非本安装部署,请联系开发确认或手动卸载后继续安装"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
when: check_out.stdout >= '1'
|
||||||
|
when: (allowed_unload) == "yes"
|
||||||
|
|
||||||
#创建文件夹
|
#创建文件夹
|
||||||
- name: create keepalived package path:{{ keepalived_package_path }}
|
- name: create keepalived package path:{{ keepalived_package_path }}
|
||||||
|
|||||||
@@ -67,13 +67,18 @@ installKP(){
|
|||||||
|
|
||||||
chmod +x $keepalivedInstallPath/$keepalivedDaeonName
|
chmod +x $keepalivedInstallPath/$keepalivedDaeonName
|
||||||
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动中"
|
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动中"
|
||||||
|
|
||||||
chmod +x /etc/rc.d/rc.local
|
chmod +x /etc/rc.d/rc.local
|
||||||
|
upStartNum=`cat /etc/rc.d/rc.local | grep "$keepalivedInstallPath" | wc -l`
|
||||||
|
if [ $upStartNum -eq "0" ];then
|
||||||
echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
|
echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
|
||||||
echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
|
echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
|
||||||
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动成功"
|
fi
|
||||||
|
|
||||||
echo "开始启动$keepalivedInstallPath/$keepalivedDaeonName守护进程"
|
echo "开始启动$keepalivedInstallPath/$keepalivedDaeonName守护进程"
|
||||||
# nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &
|
# nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &
|
||||||
|
sed -i -e 's#. /etc/sysconfig/keepalived#. /usr/local/etc/sysconfig/keepalived#g' /etc/init.d/keepalived
|
||||||
|
systemctl daemon-reload
|
||||||
service keepalived start
|
service keepalived start
|
||||||
rm -rf $keepalivedBagPath
|
rm -rf $keepalivedBagPath
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#for i in `rpm -qa | grep keepalived` ; do rpm -e --nodeps $i ; done
|
||||||
|
|
||||||
|
if [ `ps aux | grep -E "keepalived|kp_daemon" | grep -v grep | wc -l` -ne '0' ] ; then ps aux | grep -E "keepalived|kp_daemon" | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi
|
||||||
|
|
||||||
|
rm -rf /etc/keepalived
|
||||||
|
|
||||||
|
rm -rf /etc/init.d/keepalived
|
||||||
@@ -1,3 +1,6 @@
|
|||||||
|
- name: stop keepalived
|
||||||
|
shell: if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne '0' ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi && service keepalived stop
|
||||||
|
|
||||||
- name: stop and remove {{ gateway_image_container_name }} container
|
- name: stop and remove {{ gateway_image_container_name }} container
|
||||||
docker_container:
|
docker_container:
|
||||||
name: '{{ gateway_image_container_name }}'
|
name: '{{ gateway_image_container_name }}'
|
||||||
@@ -130,3 +133,7 @@
|
|||||||
- { ip: '{{ groups.services[0] }}' }
|
- { ip: '{{ groups.services[0] }}' }
|
||||||
- { ip: '{{ groups.services[1] }}' }
|
- { ip: '{{ groups.services[1] }}' }
|
||||||
|
|
||||||
|
#- name: start keepalived
|
||||||
|
# shell: nohup /bin/bash /etc/keepalived/kp_daemon.sh >/dev/null 2>&1 &
|
||||||
|
#shell: systemctl start keepalived
|
||||||
|
# shell: service keepalived start
|
||||||
|
|||||||
@@ -1,3 +1,34 @@
|
|||||||
|
#关闭旧的自启进程
|
||||||
|
- name: kill kp_daemon.sh
|
||||||
|
shell: "if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne 0 ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi"
|
||||||
|
|
||||||
|
- block:
|
||||||
|
- name: Check if the Keepalived service already exists
|
||||||
|
shell: source /etc/profile && rpm -qa | grep keepalived | wc -l
|
||||||
|
register: check_out
|
||||||
|
- name: copy unload_keepalived.sh
|
||||||
|
template:
|
||||||
|
src: unload_keepalived.sh
|
||||||
|
dest: /root/
|
||||||
|
mode: 0755
|
||||||
|
- name: unload keepalived
|
||||||
|
shell: sh /root/unload_keepalived.sh
|
||||||
|
when: check_out.stdout >= '1'
|
||||||
|
- name: Check if the keepalived service already exists
|
||||||
|
shell: source /etc/profile && rpm -qa | grep keepalived | grep -v 'keepalived-1.3.5-1.el7.x86_64' | wc -l
|
||||||
|
register: check_out
|
||||||
|
- name: delete unload_keepalived.sh
|
||||||
|
file:
|
||||||
|
path: "/root/unload_keepalived.sh"
|
||||||
|
state: absent
|
||||||
|
- name: To terminate execution
|
||||||
|
fail:
|
||||||
|
msg: "卸载失败,组件可能非本安装部署,请联系开发确认或手动卸载后继续安装"
|
||||||
|
run_once: true
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
when: check_out.stdout >= '1'
|
||||||
|
when: (allowed_unload) == "yes"
|
||||||
|
|
||||||
#创建文件夹
|
#创建文件夹
|
||||||
- name: create keepalived package path:{{ keepalived_package_path }}
|
- name: create keepalived package path:{{ keepalived_package_path }}
|
||||||
file:
|
file:
|
||||||
|
|||||||
@@ -67,12 +67,17 @@ installKP(){
|
|||||||
|
|
||||||
chmod +x $keepalivedInstallPath/$keepalivedDaeonName
|
chmod +x $keepalivedInstallPath/$keepalivedDaeonName
|
||||||
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动中"
|
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动中"
|
||||||
|
|
||||||
chmod +x /etc/rc.d/rc.local
|
chmod +x /etc/rc.d/rc.local
|
||||||
|
upStartNum=`cat /etc/rc.d/rc.local | grep "$keepalivedInstallPath" | wc -l`
|
||||||
|
if [ $upStartNum -eq "0" ];then
|
||||||
echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
|
echo -e "\n#设置$keepalivedDaeonName守护脚本开机自启动" >> /etc/rc.d/rc.local
|
||||||
echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
|
echo "nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &" >> /etc/rc.d/rc.local
|
||||||
echo "将$keepalivedInstallPath/$keepalivedDaeonName守护进程添加到开机自启动成功"
|
fi
|
||||||
|
|
||||||
echo "开始启动$keepalivedInstallPath/$keepalivedDaeonName守护进程"
|
echo "开始启动$keepalivedInstallPath/$keepalivedDaeonName守护进程"
|
||||||
|
sed -i -e 's#. /etc/sysconfig/keepalived#. /usr/local/etc/sysconfig/keepalived#g' /etc/init.d/keepalived
|
||||||
|
systemctl daemon-reload
|
||||||
nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &
|
nohup $keepalivedInstallPath/$keepalivedDaeonName >/dev/null 2>&1 &
|
||||||
|
|
||||||
rm -rf $keepalivedBagPath
|
rm -rf $keepalivedBagPath
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#for i in `rpm -qa | grep keepalived` ; do rpm -e --nodeps $i ; done
|
||||||
|
|
||||||
|
if [ `ps aux | grep -E "keepalived|kp_daemon" | grep -vE "grep|unload_keepalived" | wc -l` -ne '0' ] ; then ps aux | grep -E "keepalived|kp_daemon" | grep -vE "grep|unload_keepalived" | awk '{print $2}' | xargs kill -9 ; fi
|
||||||
|
|
||||||
|
rm -rf /etc/keepalived
|
||||||
|
|
||||||
|
rm -rf /etc/init.d/keepalived
|
||||||
@@ -1,3 +1,6 @@
|
|||||||
|
- name: stop keepalived
|
||||||
|
shell: if [ `ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | wc -l` -ne '0' ] ; then ps aux | grep kp_daemon | grep -v grep | awk '{print $2}' | xargs kill -9 ; fi && service keepalived stop
|
||||||
|
|
||||||
- name: stop and remove {{ hos_nginx_image_container_name }} container
|
- name: stop and remove {{ hos_nginx_image_container_name }} container
|
||||||
docker_container:
|
docker_container:
|
||||||
name: '{{ hos_nginx_image_container_name }}'
|
name: '{{ hos_nginx_image_container_name }}'
|
||||||
@@ -6,20 +9,20 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: remove old {{ hos_nginx_hos_service_image_name }} image
|
- name: remove old {{ hos_nginx_image_name }} image
|
||||||
docker_image:
|
docker_image:
|
||||||
name: '{{ hos_nginx_hos_service_image_name }}'
|
name: '{{ hos_nginx_image_name }}'
|
||||||
tag: '{{ hos_nginx_hos_service_image_tag_name }}'
|
tag: '{{ hos_nginx_image_tag_name }}'
|
||||||
state: absent
|
state: absent
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: create {{ hos_nginx_volume_path }}
|
- name: create {{ hos_nginx_volume_path }}
|
||||||
file:
|
file:
|
||||||
@@ -29,8 +32,8 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: create {{ install_path }}/{{ hos_nginx_soft_home_path }}
|
- name: create {{ install_path }}/{{ hos_nginx_soft_home_path }}
|
||||||
file:
|
file:
|
||||||
@@ -40,12 +43,12 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: copy {{ hos_nginx_hos_service_image_tar_name }}
|
- name: copy {{ hos_nginx_image_tar_name }}
|
||||||
copy:
|
copy:
|
||||||
src: '{{ package_path }}/{{ hos_nginx_hos_service_image_tar_name }}'
|
src: '{{ package_path }}/{{ hos_nginx_image_tar_name }}'
|
||||||
dest: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/'
|
dest: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/'
|
||||||
force: true
|
force: true
|
||||||
backup: yes
|
backup: yes
|
||||||
@@ -53,14 +56,14 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: load new {{ hos_nginx_hos_service_image_name }} image from {{ hos_nginx_hos_service_image_tar_name }}
|
- name: load new {{ hos_nginx_image_name }} image from {{ hos_nginx_image_tar_name }}
|
||||||
docker_image:
|
docker_image:
|
||||||
name: '{{ hos_nginx_hos_service_image_name }}'
|
name: '{{ hos_nginx_image_name }}'
|
||||||
tag: '{{ hos_nginx_hos_service_image_tag_name }}'
|
tag: '{{ hos_nginx_image_tag_name }}'
|
||||||
load_path: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/{{ hos_nginx_hos_service_image_tar_name }}'
|
load_path: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/{{ hos_nginx_image_tar_name }}'
|
||||||
source: load
|
source: load
|
||||||
force_tag: yes
|
force_tag: yes
|
||||||
force_source: yes
|
force_source: yes
|
||||||
@@ -68,19 +71,19 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: change the image tag
|
- name: change the image tag
|
||||||
shell: docker tag nginx-metrics:{{ hos_nginx_hos_service_image_tag_name }} {{ hos_nginx_hos_service_image_name }}:{{ hos_nginx_hos_service_image_tag_name }}
|
shell: docker tag nginx-metrics:{{ hos_nginx_image_tag_name }} {{ hos_nginx_image_name }}:{{ hos_nginx_image_tag_name }}
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: copy {{ hos_nginx_hos_service_image_name }} docker-compose.yml
|
- name: copy {{ hos_nginx_image_name }} docker-compose.yml
|
||||||
template:
|
template:
|
||||||
src: docker-compose.yml.j2
|
src: docker-compose.yml.j2
|
||||||
dest: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/docker-compose.yml'
|
dest: '{{ install_path }}/{{ hos_nginx_soft_home_path }}/docker-compose.yml'
|
||||||
@@ -89,10 +92,10 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: copy {{ hos_nginx_hos_service_image_name }} nginx.conf
|
- name: copy {{ hos_nginx_image_name }} nginx.conf
|
||||||
template:
|
template:
|
||||||
src: nginx.conf.j2
|
src: nginx.conf.j2
|
||||||
dest: '{{ install_path }}/{{ hos_nginx_volume_path }}/nginx.conf'
|
dest: '{{ install_path }}/{{ hos_nginx_volume_path }}/nginx.conf'
|
||||||
@@ -101,8 +104,8 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
- name: start {{ hos_nginx_image_container_name }} container
|
- name: start {{ hos_nginx_image_container_name }} container
|
||||||
docker_compose:
|
docker_compose:
|
||||||
@@ -111,5 +114,6 @@
|
|||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
delegate_to: "{{ item.ip }}"
|
delegate_to: "{{ item.ip }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { ip: '{{ groups.hosnginx[0] }}' }
|
- { ip: '{{ groups.hoskeepalive[0] }}' }
|
||||||
- { ip: '{{ groups.hosnginx[1] }}' }
|
- { ip: '{{ groups.hoskeepalive[1] }}' }
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
nginx:
|
nginx:
|
||||||
image: {{ hos_nginx_hos_service_image_name }}:{{ hos_nginx_hos_service_image_tag_name }}
|
image: {{ hos_nginx_image_name }}:{{ hos_nginx_image_tag_name }}
|
||||||
container_name: {{ hos_nginx_image_container_name }}
|
container_name: {{ hos_nginx_image_container_name }}
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
@@ -67,7 +67,7 @@
|
|||||||
backup: yes
|
backup: yes
|
||||||
|
|
||||||
- name: push config
|
- name: push config
|
||||||
shell: 'curl --data-urlencode content="`cat {{ data_path }}/{{ hos_service_soft_home_path }}/galaxy-hos-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-hos-service.yml&appName=galaxy-hos-service&type=yaml"'
|
shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ data_path }}/{{ hos_service_soft_home_path }}/galaxy-hos-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-hos-service.yml&appName=galaxy-hos-service&type=yaml"'
|
||||||
register: change_out
|
register: change_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
@@ -77,3 +77,20 @@
|
|||||||
- name: start {{ hos_service_image_container_name }} container
|
- name: start {{ hos_service_image_container_name }} container
|
||||||
docker_compose:
|
docker_compose:
|
||||||
project_src: '{{ data_path }}/{{ hos_service_soft_home_path }}'
|
project_src: '{{ data_path }}/{{ hos_service_soft_home_path }}'
|
||||||
|
|
||||||
|
|
||||||
|
##获取 ip 列表
|
||||||
|
#- name: get storm numbus iplist
|
||||||
|
# shell: echo "{{ ansible_play_hosts }}" | grep -E -o "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" > {{ data_path }}/{{ hos_service_soft_home_path }}/ip_list
|
||||||
|
#
|
||||||
|
#- name: copy {{ hos_service_image_name }} registerinstance.sh.j2
|
||||||
|
# template:
|
||||||
|
# src: registerinstance.sh.j2
|
||||||
|
# dest: '{{ data_path }}/{{ hos_service_soft_home_path }}/registerinstance.sh'
|
||||||
|
# backup: yes
|
||||||
|
#
|
||||||
|
#- name: change the image tag
|
||||||
|
# shell: cd {{ data_path }}/{{ hos_service_soft_home_path }} && chmod +x registerinstance.sh && ./registerinstance.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ nacos:
|
|||||||
type: yaml
|
type: yaml
|
||||||
server-addr: {{ gateway_keepalive_host }}:8848
|
server-addr: {{ gateway_keepalive_host }}:8848
|
||||||
namespace: {{ services_config_namespace }}
|
namespace: {{ services_config_namespace }}
|
||||||
data-id: galaxy-hos-service
|
data-id: galaxy-hos-service.yml
|
||||||
auto-refresh: true
|
auto-refresh: true
|
||||||
group: Galaxy
|
group: Galaxy
|
||||||
username: nacos
|
username: nacos
|
||||||
|
|||||||
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
basedir=`pwd`
|
||||||
|
serverAddr="http://{{ gateway_keepalive_host }}:8848"
|
||||||
|
namespace="{{ services_config_namespace }}"
|
||||||
|
group="Galaxy"
|
||||||
|
username="nacos"
|
||||||
|
password="{{ nacos_pin }}"
|
||||||
|
serviceName="hos"
|
||||||
|
hosPort="8186"
|
||||||
|
|
||||||
|
for ip in `cat $basedir/hosiplist`
|
||||||
|
do
|
||||||
|
issuccess=`curl --retry-delay 3 --retry 3 -X POST ''$serverAddr'/nacos/v1/ns/instance?serviceName='$serviceName'&ip='$ip'&port='$hosPort'&namespaceId='$namespace'&groupName='$group'&ephemeral=false&username='$username'&password='$password''`
|
||||||
|
if [ `echo $issuccess | grep ok | wc -l` -eq 0 ];then
|
||||||
|
echo "register $ip error"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
Binary file not shown.
@@ -34,6 +34,19 @@
|
|||||||
tag: '{{ admin_image_tag_name }}'
|
tag: '{{ admin_image_tag_name }}'
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
|
|
||||||
|
- name: stop and remove {{ job_image_container_name }} container
|
||||||
|
docker_container:
|
||||||
|
name: '{{ job_image_container_name }}'
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: remove old {{ job_image_name }} image
|
||||||
|
docker_image:
|
||||||
|
name: '{{ job_image_name }}'
|
||||||
|
tag: '{{ job_image_tag_name }}'
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
|
||||||
- name: create {{ admin_volume_path }}
|
- name: create {{ admin_volume_path }}
|
||||||
file:
|
file:
|
||||||
state: directory
|
state: directory
|
||||||
|
|||||||
@@ -91,7 +91,7 @@
|
|||||||
backup: yes
|
backup: yes
|
||||||
|
|
||||||
- name: push config
|
- name: push config
|
||||||
shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-executor`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-executor.properties&appName=galaxy-job-executor&type=properties"'
|
shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-executor`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-executor.properties&appName=galaxy-job-executor&type=properties"'
|
||||||
register: change_out
|
register: change_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
@@ -99,7 +99,7 @@
|
|||||||
when: change_out.stdout != 'true'
|
when: change_out.stdout != 'true'
|
||||||
|
|
||||||
- name: push config
|
- name: push config
|
||||||
shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-admin`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-admin.properties&appName=galaxy-job-admin&type=properties"'
|
shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ job_soft_home_path }}/galaxy-job-admin`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-job-admin.properties&appName=galaxy-job-admin&type=properties"'
|
||||||
register: change_out
|
register: change_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ version: '2'
|
|||||||
services:
|
services:
|
||||||
galaxy-job-executor:
|
galaxy-job-executor:
|
||||||
image: {{ job_image_name }}:{{ job_image_tag_name }}
|
image: {{ job_image_name }}:{{ job_image_tag_name }}
|
||||||
container_name: galaxy-job
|
container_name: {{ job_image_container_name }}
|
||||||
environment:
|
environment:
|
||||||
JAVA_OPTS: "{{ job_java_opts }}"
|
JAVA_OPTS: "{{ job_java_opts }}"
|
||||||
ports:
|
ports:
|
||||||
@@ -18,7 +18,7 @@ services:
|
|||||||
|
|
||||||
galaxy-job-admin:
|
galaxy-job-admin:
|
||||||
image: {{ admin_image_name }}:{{ admin_image_tag_name }}
|
image: {{ admin_image_name }}:{{ admin_image_tag_name }}
|
||||||
container_name: xxl-job-admin
|
container_name: {{ admin_image_container_name }}
|
||||||
environment:
|
environment:
|
||||||
JAVA_OPTS: "{{ job_java_opts }}"
|
JAVA_OPTS: "{{ job_java_opts }}"
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 80 MiB After Width: | Height: | Size: 80 MiB |
@@ -1,3 +1,14 @@
|
|||||||
|
- name: start keepalived service
|
||||||
|
shell: service keepalived start
|
||||||
|
|
||||||
|
- name: check keepalived
|
||||||
|
shell: ps aux | grep kp_daemon | grep -v grep | wc -l
|
||||||
|
register: daemon_stat
|
||||||
|
|
||||||
|
- name: start keepalived
|
||||||
|
shell: nohup /bin/bash /etc/keepalived/kp_daemon.sh >/dev/null 2>&1 &
|
||||||
|
when: daemon_stat.stdout == '0'
|
||||||
|
|
||||||
- name: get zookeeper_servers to ansible variable
|
- name: get zookeeper_servers to ansible variable
|
||||||
set_fact: zookeeper_servers="{{groups.zookeeper[0]}}:2181,{{groups.zookeeper[1]}}:2181,{{groups.zookeeper[2]}}:2181"
|
set_fact: zookeeper_servers="{{groups.zookeeper[0]}}:2181,{{groups.zookeeper[1]}}:2181,{{groups.zookeeper[2]}}:2181"
|
||||||
when: '(groups.zookeeper|length) == 3'
|
when: '(groups.zookeeper|length) == 3'
|
||||||
@@ -88,7 +99,7 @@
|
|||||||
backup: yes
|
backup: yes
|
||||||
|
|
||||||
#- name: push config
|
#- name: push config
|
||||||
# shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ qgw_soft_home_path }}/galaxy-qgw-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-qgw-service.yml&appName=galaxy-qgw-service&type=yaml"'
|
# shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ qgw_soft_home_path }}/galaxy-qgw-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-qgw-service.yml&appName=galaxy-qgw-service&type=yaml"'
|
||||||
# register: change_out
|
# register: change_out
|
||||||
|
|
||||||
#- debug:
|
#- debug:
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ do
|
|||||||
#3.提交config配置
|
#3.提交config配置
|
||||||
for (( i = 0; i < 10; i++ )); do
|
for (( i = 0; i < 10; i++ )); do
|
||||||
if [ "$suffix" = "yaml" ]||[ "$suffix" = "json" ]||[ "$suffix" = "text" ]||[ "$suffix" = "properties" ];then
|
if [ "$suffix" = "yaml" ]||[ "$suffix" = "json" ]||[ "$suffix" = "text" ]||[ "$suffix" = "properties" ];then
|
||||||
result=$(curl -sw '%{http_code}' -o /dev/null --request POST "$nacos_push_url&dataId=$data_id&appName=$appName&type=$suffix" --data-urlencode content="`cat $config_path_file`")
|
result=$(curl --retry-delay 3 --retry 3 -sw '%{http_code}' -o /dev/null --request POST "$nacos_push_url&dataId=$data_id&appName=$appName&type=$suffix" --data-urlencode content="`cat $config_path_file`")
|
||||||
echo "push config response code "$result
|
echo "push config response code "$result
|
||||||
if [[ $result -eq '200' ]];then
|
if [[ $result -eq '200' ]];then
|
||||||
i=10
|
i=10
|
||||||
|
|||||||
@@ -63,7 +63,7 @@
|
|||||||
backup: yes
|
backup: yes
|
||||||
|
|
||||||
- name: push config
|
- name: push config
|
||||||
shell: 'curl --data-urlencode content="`cat {{ install_path }}/{{ report_soft_home_path }}/galaxy-report-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-report-service.yml&appName=galaxy-report-service&type=yaml"'
|
shell: 'curl --retry-delay 3 --retry 3 --data-urlencode content="`cat {{ install_path }}/{{ report_soft_home_path }}/galaxy-report-service`" --request POST "http://{{ gateway_keepalive_host }}:8848/nacos/v1/cs/configs?username=nacos&password={{ nacos_pin }}&tenant={{ services_config_namespace }}&group=Galaxy&dataId=galaxy-report-service.yml&appName=galaxy-report-service&type=yaml"'
|
||||||
register: change_out
|
register: change_out
|
||||||
|
|
||||||
- fail:
|
- fail:
|
||||||
|
|||||||
@@ -27,6 +27,18 @@
|
|||||||
# msg: "{{groups['flink'].index}}"
|
# msg: "{{groups['flink'].index}}"
|
||||||
when: index_no >= 0
|
when: index_no >= 0
|
||||||
|
|
||||||
|
- name: test mysql
|
||||||
|
shell: which mysqlk
|
||||||
|
register: result
|
||||||
|
failed_when: "'FAILED' in result.stderr or result.stdout != '20'"
|
||||||
|
ignore_errors: True
|
||||||
|
|
||||||
|
- name: output result
|
||||||
|
debug:
|
||||||
|
msg: "shuchu: {{ result }}"
|
||||||
|
#when: result.stderr is defined and result.stderr != '' #有错误信息,说明mariadb服务不存在
|
||||||
|
when: result is undefined or result.stdout != '20'
|
||||||
|
|
||||||
vars_files:
|
vars_files:
|
||||||
- ../configurations/config.yml
|
- ../configurations/config.yml
|
||||||
- ../configurations/components.yml
|
- ../configurations/components.yml
|
||||||
|
|||||||
Reference in New Issue
Block a user