提交各组件部署Ansible剧本初版
This commit is contained in:
223
Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml
Normal file
223
Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml
Normal file
@@ -0,0 +1,223 @@
|
||||
- name: Setting node_nums variable
|
||||
set_fact: node_nums="{{groups.hdfs|length}}"
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Fully Distributed Mode at least 3 nodes, please checking configurations/hosts -> hdfs"
|
||||
when: node_nums < 3
|
||||
|
||||
- name: check Jdk version
|
||||
shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
|
||||
ignore_errors: false
|
||||
register: jdk_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "JDK is not installed in the target cluster, please check!"
|
||||
when: jdk_out.stdout != '2'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: create hadoop package path:{{ deploy_dir }}
|
||||
file:
|
||||
state: directory
|
||||
path: '{{ item.path }}'
|
||||
with_items:
|
||||
- { path: '{{ hdfs_data_dir }}' }
|
||||
- { path: '{{ deploy_dir }}' }
|
||||
|
||||
- name: master_ip to ansible variable
|
||||
set_fact: master_ip={{groups.hdfs[0]}}
|
||||
|
||||
- name: slave1_ip to ansible variable
|
||||
set_fact: slave1_ip={{groups.hdfs[1]}}
|
||||
|
||||
- name: slave2_ip to ansible variable
|
||||
set_fact: slave2_ip={{groups.hdfs[2]}}
|
||||
|
||||
#解压tar
|
||||
- name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
|
||||
unarchive:
|
||||
src: 'files/{{ hadoop_version }}.tar.gz'
|
||||
dest: '{{ deploy_dir }}/'
|
||||
|
||||
- name: Copying hadoop config files
|
||||
template:
|
||||
src: '{{ item.src }}'
|
||||
dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
|
||||
mode: '{{ item.mode }}'
|
||||
backup: false
|
||||
with_items:
|
||||
- { src: 'core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
|
||||
- { src: 'hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
|
||||
- { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
|
||||
- { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
|
||||
- { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
|
||||
- { src: 'set_hdfs_env.sh.j2', dest: 'bin/set_hdfs_env.sh', mode: '0755' }
|
||||
|
||||
- name: Copying HDFS config to {{ master_ip }}
|
||||
template:
|
||||
src: '{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: false
|
||||
with_items:
|
||||
- { src: 'daemonscript/dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
|
||||
- { src: 'daemonscript/dae-hdfsmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsmaster.sh' }
|
||||
- { src: 'daemonscript/keephdfsmaster.j2', dest: '/etc/init.d/keephdfsmaster' }
|
||||
- { src: 'daemonscript/keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
|
||||
- { src: 'ini_hdfs.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh' }
|
||||
run_once: true
|
||||
delegate_to: "{{ master_ip }}"
|
||||
|
||||
- name: Copying HDFS config to {{ slave1_ip }}
|
||||
template:
|
||||
src: 'daemonscript/{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: yes
|
||||
with_items:
|
||||
- { src: 'dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
|
||||
- { src: 'dae-hdfsslave.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsslave.sh' }
|
||||
- { src: 'keephdfsslave.j2', dest: '/etc/init.d/keephdfsslave' }
|
||||
- { src: 'keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
|
||||
run_once: true
|
||||
delegate_to: "{{ slave1_ip }}"
|
||||
|
||||
- name: Copying HDFS config to {{ slave2_ip }}
|
||||
template:
|
||||
src: 'daemonscript/{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: yes
|
||||
with_items:
|
||||
- { src: 'dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
|
||||
- { src: 'keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
|
||||
run_once: true
|
||||
delegate_facts: true
|
||||
delegate_to: "{{ slave2_ip }}"
|
||||
|
||||
- name: Copying HDFS config to worker nodes
|
||||
template:
|
||||
src: 'daemonscript/{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: yes
|
||||
with_items:
|
||||
- { src: 'dae-hdfsworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsworker.sh' }
|
||||
- { src: 'keephdfsworker.j2', dest: '/etc/init.d/keephdfsworker' }
|
||||
|
||||
- name: set hadoop env
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh {{ item.opeation }}
|
||||
with_items:
|
||||
- { opeation: 'chkconfig' }
|
||||
- { opeation: 'journal' }
|
||||
|
||||
- name: Waiting for the JournalNode start,sleep 10s
|
||||
shell: sleep 10
|
||||
|
||||
- block:
|
||||
- name: checking JournalNode status
|
||||
shell: source /etc/profile && jps | grep JournalNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking JournalNode
|
||||
fail:
|
||||
msg: "JournalNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*journalnode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: inventory_hostname in [master_ip,slave1_ip,slave2_ip]
|
||||
|
||||
- name: Initialization NameNode/ZKFC,Start master NameNode
|
||||
block:
|
||||
- name: initialization hadoop NameNode
|
||||
shell: sh {{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh namenode | grep "yes" | grep -v grep | wc -l
|
||||
register: ini_namenode_out
|
||||
|
||||
- name: checking namenode init status
|
||||
fail:
|
||||
msg: "namenode 初始化异常,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: ini_namenode_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: initialization hadoop ZKFC
|
||||
shell: sh {{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh zkfc | grep "yes" | grep -v grep | wc -l
|
||||
register: ini_zkfc_out
|
||||
|
||||
- name: checking hadoop-zk init status
|
||||
fail:
|
||||
msg: "hadoop-zk 初始化异常,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
|
||||
when: ini_zkfc_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: start hadoop Master node
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh master
|
||||
|
||||
- name: Waiting for the Master-namenode start,sleep 20s
|
||||
shell: sleep 20
|
||||
|
||||
- name: checking {{ master_ip }} NameNode status
|
||||
shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
|
||||
register: master_namenode_status
|
||||
|
||||
- name: checking master NameNode
|
||||
fail:
|
||||
msg: "NameNode-master未启动,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: master_namenode_status.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
run_once: true
|
||||
delegate_facts: true
|
||||
delegate_to: "{{ master_ip }}"
|
||||
|
||||
- name: Start slave NameNode
|
||||
block:
|
||||
- name: copying {{ master_ip }} NameNode files to Slave
|
||||
shell: "yes | {{ deploy_dir }}/{{ hadoop_version }}/bin/hdfs namenode -bootstrapStandby"
|
||||
|
||||
- name: start hadoop Slave node
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh slave
|
||||
|
||||
- name: Waiting for the Slave-namenode start,sleep 60s
|
||||
shell: sleep 60
|
||||
|
||||
- name: checking {{ slave1_ip }} NameNode status
|
||||
shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
|
||||
register: slave1_namenode_status
|
||||
|
||||
- name: checking slavel NameNode
|
||||
fail:
|
||||
msg: "NameNode-slave未启动,请登陆[{{ slave1_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: slave1_namenode_status.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
run_once: true
|
||||
delegate_facts: true
|
||||
delegate_to: "{{ slave1_ip }}"
|
||||
|
||||
- name: Start DataNode
|
||||
block:
|
||||
- name: start hadoop Worker nodes
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh worker
|
||||
|
||||
- name: Waiting for the DataNode start,sleep 60s
|
||||
shell: sleep 60
|
||||
|
||||
- name: checking DataNode status
|
||||
shell: source /etc/profile && jps | grep DataNode | grep -v grep | wc -l
|
||||
register: datanode_status
|
||||
|
||||
- name: checking DataNode
|
||||
fail:
|
||||
msg: "DataNode未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*datanode*"
|
||||
when: datanode_status.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: delete {{ deploy_dir }}/hadoop-2.7.1.tar.gz
|
||||
file:
|
||||
path: "{{ deploy_dir }}/{{ hadoop_version }}.tar.gz"
|
||||
state: absent
|
||||
9
Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml
Normal file
9
Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- block:
|
||||
- include: uninstall.yml
|
||||
- include: deploy.yml
|
||||
- include: status-check.yml
|
||||
when: (operation) == "install"
|
||||
|
||||
- block:
|
||||
- include: uninstall.yml
|
||||
when: (operation) == "uninstall"
|
||||
53
Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml
Normal file
53
Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
- name: Setting node_nums variable
|
||||
set_fact: node_nums="{{groups.hdfs|length}}"
|
||||
|
||||
- name: Waiting for the HDFS start,sleep 30s
|
||||
shell: sleep 30
|
||||
|
||||
- block:
|
||||
- name: checking JournalNode status
|
||||
shell: source /etc/profile && jps | grep JournalNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking JournalNode
|
||||
fail:
|
||||
msg: "JournalNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*journalnode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: inventory_hostname in groups['hdfs'][0:3]
|
||||
|
||||
- block:
|
||||
- name: checking DFSZKFailoverController status
|
||||
shell: source /etc/profile && jps | grep DFSZKFailoverController | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking DFSZKFailoverController
|
||||
fail:
|
||||
msg: "DFSZKFailoverController节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*zkfc*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: checking NameNode status
|
||||
shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking NameNode
|
||||
fail:
|
||||
msg: "NameNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: inventory_hostname in groups['hdfs'][0:2]
|
||||
|
||||
- name: checking DataNode status
|
||||
shell: source /etc/profile && jps | grep DataNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking DataNode
|
||||
fail:
|
||||
msg: "DFSZKFailoverController节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*datanode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
38
Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml
Normal file
38
Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
- block:
|
||||
- name: copy unload_hdfs.sh to {{ deploy_dir }}/
|
||||
template:
|
||||
src: 'unload_hdfs.sh.j2'
|
||||
dest: '{{ deploy_dir }}/unload_hdfs.sh'
|
||||
force: true
|
||||
mode: 0755
|
||||
|
||||
- name: unload hadoop
|
||||
shell: cd {{ deploy_dir }} && sh unload_hdfs.sh
|
||||
|
||||
- name: Ansible delete {{ deploy_dir }}/unload_hdfs.sh
|
||||
file:
|
||||
path: "{{ deploy_dir }}/unload_hdfs.sh"
|
||||
state: absent
|
||||
|
||||
- name: Checking ZooKeeper has Hadoop nodes
|
||||
shell: docker exec zookeeper zkCli.sh ls / | grep -w "hadoop-ha" | wc -l
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
register: has_zknode
|
||||
|
||||
- name: Delete Hadoop nodes in ZooKeeper
|
||||
shell: "docker exec zookeeper zkCli.sh rmr /hadoop-ha"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
when: has_zknode.stdout >= '1'
|
||||
|
||||
- name: Check if the Hadoop service already exists
|
||||
shell: source /etc/profile && jps -l | egrep "org.apache.hadoop.hdfs.qjournal.server.JournalNode|org.apache.hadoop.hdfs.tools.DFSZKFailoverController|org.apache.hadoop.hdfs.server.datanode.DataNode|org.apache.hadoop.hdfs.server.namenode.NameNode" | wc -l
|
||||
register: check_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "卸载失败,组件可能非本安装部署,请手动卸载后继续安装"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: check_out.stdout >= '1'
|
||||
Reference in New Issue
Block a user