提交各组件部署Ansible剧本初版

This commit is contained in:
qidaijie
2024-01-18 15:35:33 +08:00
parent f0bd05d565
commit 0cc392df5c
262 changed files with 15927 additions and 0 deletions

View File

@@ -0,0 +1,194 @@
- name: Setting node_nums variable
set_fact: node_nums="{{groups.hdfs|length}}"
- name: To terminate execution
fail:
msg: "Fully Distributed Mode at least 3 nodes, please checking configurations/hosts -> hdfs"
when: node_nums < 3
- name: check Jdk version
shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
ignore_errors: false
register: jdk_out
- name: To terminate execution
fail:
msg: "JDK is not installed in the target cluster, please check!"
when: jdk_out.stdout != '2'
run_once: true
delegate_to: 127.0.0.1
- name: create hadoop package path:{{ deploy_dir }}
file:
state: directory
path: '{{ deploy_dir }}'
- block:
- name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
unarchive:
src: 'files/{{ hadoop_version }}.tar.gz'
dest: '{{ deploy_dir }}/'
- name: copying yarn master config files
template:
src: '{{ item.src }}'
dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
mode: '{{ item.mode }}'
backup: false
with_items:
- { src: 'yarn-site.xml.j2', dest: 'etc/hadoop/yarn-site.xml', mode: '0644' }
- { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
- { src: 'set_yarn_env.sh.j2', dest: 'bin/set_yarn_env.sh', mode: '0755' }
- { src: 'core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
- { src: 'hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
- { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
- { src: 'capacity-scheduler.xml.j2', dest: 'etc/hadoop/capacity-scheduler.xml', mode: '0644' }
- { src: 'yarn-env.sh.j2', dest: 'etc/hadoop/yarn-env.sh', mode: '0755' }
- { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
when: inventory_hostname not in groups['hdfs']
- name: copying yarn master config files
template:
src: '{{ item.src }}'
dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
mode: '{{ item.mode }}'
backup: false
with_items:
- { src: 'yarn-site.xml.j2', dest: 'etc/hadoop/yarn-site.xml', mode: '0644' }
- { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
- { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
- { src: 'yarn-env.sh.j2', dest: 'etc/hadoop/yarn-env.sh', mode: '0755' }
- { src: 'set_yarn_env.sh.j2', dest: 'bin/set_yarn_env.sh', mode: '0755' }
- { src: 'capacity-scheduler.xml.j2', dest: 'etc/hadoop/capacity-scheduler.xml', mode: '0644' }
when: inventory_hostname in groups['hdfs']
- block:
- name: copying yarn worker
template:
src: 'daemonscript/{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0755
backup: yes
with_items:
- { src: 'dae-yarnhistory.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnhistory.sh' }
- { src: 'dae-yarnmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnmaster.sh' }
- { src: 'keepyarnhistory.j2', dest: '/etc/init.d/keepyarnhistory' }
- { src: 'keepyarnmaster.j2', dest: '/etc/init.d/keepyarnmaster' }
when: inventory_hostname in groups['yarn'][0:2]
- block:
- name: Start ResourceManager and JobHistoryServer
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
with_items:
- { opeation: 'chkconfig' }
- { opeation: 'master' }
- { opeation: 'history' }
- name: Waiting for the ResourceManager start,sleep 60s
shell: sleep 60
- name: checking ResourceManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l
register: resourcemanager_check
- name: checking ResourceManager
fail:
msg: "ResourceManager节点启动异常请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: resourcemanager_check.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
- name: checking JobHistoryServer status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l
register: history_check
- name: checking JobHistoryServer
fail:
msg: "JobHistoryServer节点启动异常请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: history_check.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: inventory_hostname in groups['yarn'][0:2]
- block:
- name: copying yarn worker
template:
src: 'daemonscript/{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0755
backup: yes
with_items:
- { src: 'dae-yarnworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh' }
- { src: 'keepyarnworker.j2', dest: '/etc/init.d/keepyarnworker' }
- name: Start NodeManager
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
with_items:
- { opeation: 'chkconfig' }
- { opeation: 'worker' }
- name: Waiting for the NodeManager start,sleep 60s
shell: sleep 60
- name: checking NodeManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
register: datanode_status
- name: checking NodeManager
fail:
msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: datanode_status.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: node_nums >= cluster_limit and inventory_hostname not in groups['yarn'][0:2]
- block:
- name: copying yarn worker
template:
src: 'daemonscript/{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0755
backup: yes
with_items:
- { src: 'dae-yarnworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh' }
- { src: 'keepyarnworker.j2', dest: '/etc/init.d/keepyarnworker' }
- name: Start NodeManager
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
with_items:
- { opeation: 'chkconfig' }
- { opeation: 'worker' }
- name: Waiting for the NodeManager start,sleep 60s
shell: sleep 60
- name: checking NodeManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
register: datanode_status
- name: checking NodeManager
fail:
msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: datanode_status.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: node_nums < cluster_limit
#--------------------------------------------Flink----------------------------------------------#
- name: Copying Flink installation package
unarchive:
src: 'files/{{ flink_version }}.tgz'
dest: '{{ deploy_dir }}/{{ hadoop_version }}/'
- name: Config flink configuration
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: '{{ item.mode }}'
with_items:
- { src: 'flink/flink.sh.j2', dest: '/etc/profile.d/flink.sh', mode: '0755' }
- { src: 'flink/flink-conf.yaml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/flink-conf.yaml', mode: '0644' }
- { src: 'yarn-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/yarn-site.xml', mode: '0644' }
- { src: 'core-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/core-site.xml', mode: '0644' }
- { src: 'hdfs-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/hdfs-site.xml', mode: '0644' }

View File

@@ -0,0 +1,136 @@
- name: check Jdk version
shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
ignore_errors: false
register: jdk_out
- name: To terminate execution
fail:
msg: "JDK is not installed in the target cluster, please check!"
when: jdk_out.stdout != '2'
run_once: true
delegate_to: 127.0.0.1
- name: create hadoop package path:{{ deploy_dir }}
file:
state: directory
path: '{{ deploy_dir }}'
- name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
unarchive:
src: 'files/{{ hadoop_version }}.tar.gz'
dest: '{{ deploy_dir }}/'
- name: copying yarn master config files
template:
src: '{{ item.src }}'
dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
mode: '{{ item.mode }}'
backup: false
with_items:
- { src: 'standalone/yarn-site.xml.j2', dest: 'etc/hadoop/yarn-site.xml', mode: '0644' }
- { src: 'standalone/hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
- { src: 'standalone/core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
- { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
- { src: 'set_yarn_env.sh.j2', dest: 'bin/set_yarn_env.sh', mode: '0755' }
- { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
- { src: 'capacity-scheduler.xml.j2', dest: 'etc/hadoop/capacity-scheduler.xml', mode: '0644' }
- { src: 'yarn-env.sh.j2', dest: 'etc/hadoop/yarn-env.sh', mode: '0755' }
- { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
- name: copying yarn worker
template:
src: 'daemonscript/{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0755
backup: yes
with_items:
- { src: 'dae-yarnhistory.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnhistory.sh' }
- { src: 'dae-yarnmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnmaster.sh' }
- { src: 'keepyarnhistory.j2', dest: '/etc/init.d/keepyarnhistory' }
- { src: 'keepyarnmaster.j2', dest: '/etc/init.d/keepyarnmaster' }
- block:
- name: Start ResourceManager and JobHistoryServer
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
with_items:
- { opeation: 'chkconfig' }
- { opeation: 'master' }
- { opeation: 'history' }
- name: Waiting for the ResourceManager start,sleep 60s
shell: sleep 60
- name: checking ResourceManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l
register: resourcemanager_check
- name: checking ResourceManager
fail:
msg: "ResourceManager节点启动异常请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: resourcemanager_check.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
- name: checking JobHistoryServer status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l
register: history_check
- name: checking JobHistoryServer
fail:
msg: "JobHistoryServer节点启动异常请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: history_check.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: inventory_hostname in groups['yarn'][0:2]
- block:
- name: copying yarn worker
template:
src: 'daemonscript/{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0755
backup: yes
with_items:
- { src: 'dae-yarnworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-yarnworker.sh' }
- { src: 'keepyarnworker.j2', dest: '/etc/init.d/keepyarnworker' }
- name: Start NodeManager
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_yarn_env.sh {{ item.opeation }}
with_items:
- { opeation: 'chkconfig' }
- { opeation: 'worker' }
- name: Waiting for the NodeManager start,sleep 60s
shell: sleep 60
- name: checking NodeManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
register: datanode_status
- name: checking NodeManager
fail:
msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: datanode_status.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
#--------------------------------------------Flink----------------------------------------------#
- name: Copying Flink installation package
unarchive:
src: 'files/{{ flink_version }}.tgz'
dest: '{{ deploy_dir }}/{{ hadoop_version }}/'
- name: Config flink configuration
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: '{{ item.mode }}'
with_items:
- { src: 'flink/flink.sh.j2', dest: '/etc/profile.d/flink.sh', mode: '0755' }
- { src: 'flink/flink-conf.yaml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/flink-conf.yaml', mode: '0644' }
- { src: 'standalone/yarn-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/yarn-site.xml', mode: '0644' }
- { src: 'standalone/core-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/core-site.xml', mode: '0644' }
- { src: 'standalone/hdfs-site.xml.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/conf/hdfs-site.xml', mode: '0644' }
- name: Start flink session
shell: source /etc/profile && cd {{ deploy_dir }}/{{ hadoop_version }}/{{ flink_version }}/bin/ && ./yarn-session.sh -d

View File

@@ -0,0 +1,12 @@
- block:
- include: uninstall.yml
- include: "{{ playbook_name }}"
vars:
playbook_name: "{{ 'deploy-cluster.yml' if groups.yarn | length > 1 else 'deploy-standalone.yml' }}"
- include: status-check.yml
when: (operation) == "install"
- block:
- include: uninstall.yml
when: (operation) == "uninstall"

View File

@@ -0,0 +1,57 @@
- name: Setting node_nums variable
set_fact: node_nums="{{groups.yarn|length}}"
- name: Waiting for the Yarn start,sleep 30s
shell: sleep 30
- block:
- name: checking ResourceManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager" | grep -v grep | wc -l
register: resourcemanager_check
- name: checking ResourceManager
fail:
msg: "ResourceManager节点启动异常请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: resourcemanager_check.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
- name: checking JobHistoryServer status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | grep -v grep | wc -l
register: history_check
- name: checking JobHistoryServer
fail:
msg: "JobHistoryServer节点启动异常请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: history_check.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: inventory_hostname in groups['yarn'][0:2]
- block:
- name: checking NodeManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
register: datanode_status
- name: checking NodeManager
fail:
msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: datanode_status.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: node_nums >= cluster_limit and inventory_hostname not in groups['yarn'][0:2]
- block:
- name: checking NodeManager status
shell: source /etc/profile && jps -l | grep "org.apache.hadoop.yarn.server.nodemanager.NodeManager" | grep -v grep | wc -l
register: datanode_status
- name: checking NodeManager
fail:
msg: "NodeManager未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
when: datanode_status.stdout != '1'
run_once: true
delegate_to: 127.0.0.1
when: node_nums < cluster_limit

View File

@@ -0,0 +1,55 @@
- block:
- name: copy unload_hadoop_yarn.sh to {{ deploy_dir }}/
template:
src: 'unload_hadoop_yarn.sh.j2'
dest: '{{ deploy_dir }}/unload_hadoop_yarn.sh'
force: true
mode: 0755
- name: unload hadoop
shell: cd {{ deploy_dir }} && sh unload_hadoop_yarn.sh
- name: Ansible delete {{ deploy_dir }}/unload_hadoop_yarn.sh
file:
path: "{{ deploy_dir }}/unload_hadoop_yarn.sh"
state: absent
- name: Ansible delete old /etc/profile.d/flink.sh
file:
path: '/etc/profile.d/flink.sh'
state: absent
- name: Checking ZooKeeper has yarn nodes
shell: "docker exec zookeeper zkCli.sh ls / | grep rmstore | wc -l"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
register: rmstore_zknode
- name: Delete Hadoop nodes in ZooKeeper
shell: "docker exec zookeeper zkCli.sh rmr /rmstore"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
when: rmstore_zknode.stdout >= '1'
- name: Checking ZooKeeper has yarn nodes
shell: docker exec zookeeper zkCli.sh ls / | grep "yarn-leader-election" | wc -l
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
register: leader_zknode
- name: Delete Hadoop nodes in ZooKeeper
shell: "docker exec zookeeper zkCli.sh rmr /yarn-leader-election"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
when: leader_zknode.stdout >= '1'
- name: Check if the Hadoop service already exists
shell: source /etc/profile && jps -l | egrep "org.apache.hadoop.yarn.server.resourcemanager.ResourceManager|org.apache.hadoop.yarn.server.nodemanager.NodeManager|org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer" | wc -l
register: check_out
- name: To terminate execution
fail:
msg: "卸载失败,组件可能非本安装部署,请手动卸载后继续安装"
run_once: true
delegate_to: 127.0.0.1
when: check_out.stdout >= '1'