提交各组件部署Ansible剧本初版
This commit is contained in:
5
Apache Hadoop/2.7.1/hdfs/hosts
Normal file
5
Apache Hadoop/2.7.1/hdfs/hosts
Normal file
@@ -0,0 +1,5 @@
|
||||
[zookeeper]
|
||||
192.168.45.102
|
||||
|
||||
[hdfs]
|
||||
192.168.45.102
|
||||
7
Apache Hadoop/2.7.1/hdfs/install.yml
Normal file
7
Apache Hadoop/2.7.1/hdfs/install.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
- hosts: hdfs
|
||||
remote_user: root
|
||||
roles:
|
||||
- role
|
||||
vars_files:
|
||||
- role/vars/main.yml
|
||||
|
||||
23
Apache Hadoop/2.7.1/hdfs/role/defaults/main.yml
Normal file
23
Apache Hadoop/2.7.1/hdfs/role/defaults/main.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
#The default installation location
|
||||
deploy_dir: /data/olap
|
||||
|
||||
#The default data storage location,use storing application data,logs and configuration files
|
||||
data_dir: /data/olap
|
||||
|
||||
hadoop:
|
||||
namenode:
|
||||
#Running memory of the Hadoop Namenode.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#The number of Namenode RPC server threads that listen to requests from clients.
|
||||
dfs.namenode.handler.count: 30
|
||||
datanode:
|
||||
#Running memory of the Hadoop Datanode.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
#The number of server threads for the datanode.
|
||||
dfs.datanode.handler.count: 40
|
||||
journalnode:
|
||||
#Running memory of the Hadoop JournalNode.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
zkfc:
|
||||
#Running memory of the Hadoop DFSZKFailoverController.
|
||||
java_opt: '-Xmx1024m -Xms1024m'
|
||||
223
Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml
Normal file
223
Apache Hadoop/2.7.1/hdfs/role/tasks/deploy.yml
Normal file
@@ -0,0 +1,223 @@
|
||||
- name: Setting node_nums variable
|
||||
set_fact: node_nums="{{groups.hdfs|length}}"
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "Fully Distributed Mode at least 3 nodes, please checking configurations/hosts -> hdfs"
|
||||
when: node_nums < 3
|
||||
|
||||
- name: check Jdk version
|
||||
shell: source /etc/profile && java -version 2>&1 | grep {{ java_version }} | wc -l
|
||||
ignore_errors: false
|
||||
register: jdk_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "JDK is not installed in the target cluster, please check!"
|
||||
when: jdk_out.stdout != '2'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: create hadoop package path:{{ deploy_dir }}
|
||||
file:
|
||||
state: directory
|
||||
path: '{{ item.path }}'
|
||||
with_items:
|
||||
- { path: '{{ hdfs_data_dir }}' }
|
||||
- { path: '{{ deploy_dir }}' }
|
||||
|
||||
- name: master_ip to ansible variable
|
||||
set_fact: master_ip={{groups.hdfs[0]}}
|
||||
|
||||
- name: slave1_ip to ansible variable
|
||||
set_fact: slave1_ip={{groups.hdfs[1]}}
|
||||
|
||||
- name: slave2_ip to ansible variable
|
||||
set_fact: slave2_ip={{groups.hdfs[2]}}
|
||||
|
||||
#解压tar
|
||||
- name: unpack hadoop-2.7.1.tar.gz to {{ deploy_dir }}/
|
||||
unarchive:
|
||||
src: 'files/{{ hadoop_version }}.tar.gz'
|
||||
dest: '{{ deploy_dir }}/'
|
||||
|
||||
- name: Copying hadoop config files
|
||||
template:
|
||||
src: '{{ item.src }}'
|
||||
dest: '{{ deploy_dir }}/{{ hadoop_version }}/{{ item.dest }}'
|
||||
mode: '{{ item.mode }}'
|
||||
backup: false
|
||||
with_items:
|
||||
- { src: 'core-site.xml.j2', dest: 'etc/hadoop/core-site.xml', mode: '0644' }
|
||||
- { src: 'hdfs-site.xml.j2', dest: 'etc/hadoop/hdfs-site.xml', mode: '0644' }
|
||||
- { src: 'mapred-site.xml.j2', dest: 'etc/hadoop/mapred-site.xml', mode: '0644' }
|
||||
- { src: 'slaves.j2', dest: 'etc/hadoop/slaves', mode: '0644' }
|
||||
- { src: 'hadoop-env.sh.j2', dest: 'etc/hadoop/hadoop-env.sh', mode: '0755' }
|
||||
- { src: 'set_hdfs_env.sh.j2', dest: 'bin/set_hdfs_env.sh', mode: '0755' }
|
||||
|
||||
- name: Copying HDFS config to {{ master_ip }}
|
||||
template:
|
||||
src: '{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: false
|
||||
with_items:
|
||||
- { src: 'daemonscript/dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
|
||||
- { src: 'daemonscript/dae-hdfsmaster.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsmaster.sh' }
|
||||
- { src: 'daemonscript/keephdfsmaster.j2', dest: '/etc/init.d/keephdfsmaster' }
|
||||
- { src: 'daemonscript/keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
|
||||
- { src: 'ini_hdfs.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh' }
|
||||
run_once: true
|
||||
delegate_to: "{{ master_ip }}"
|
||||
|
||||
- name: Copying HDFS config to {{ slave1_ip }}
|
||||
template:
|
||||
src: 'daemonscript/{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: yes
|
||||
with_items:
|
||||
- { src: 'dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
|
||||
- { src: 'dae-hdfsslave.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsslave.sh' }
|
||||
- { src: 'keephdfsslave.j2', dest: '/etc/init.d/keephdfsslave' }
|
||||
- { src: 'keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
|
||||
run_once: true
|
||||
delegate_to: "{{ slave1_ip }}"
|
||||
|
||||
- name: Copying HDFS config to {{ slave2_ip }}
|
||||
template:
|
||||
src: 'daemonscript/{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: yes
|
||||
with_items:
|
||||
- { src: 'dae-hdfsjournal.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh' }
|
||||
- { src: 'keephdfsjournal.j2', dest: '/etc/init.d/keephdfsjournal' }
|
||||
run_once: true
|
||||
delegate_facts: true
|
||||
delegate_to: "{{ slave2_ip }}"
|
||||
|
||||
- name: Copying HDFS config to worker nodes
|
||||
template:
|
||||
src: 'daemonscript/{{ item.src }}'
|
||||
dest: '{{ item.dest }}'
|
||||
mode: 0755
|
||||
backup: yes
|
||||
with_items:
|
||||
- { src: 'dae-hdfsworker.sh.j2', dest: '{{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsworker.sh' }
|
||||
- { src: 'keephdfsworker.j2', dest: '/etc/init.d/keephdfsworker' }
|
||||
|
||||
- name: set hadoop env
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh {{ item.opeation }}
|
||||
with_items:
|
||||
- { opeation: 'chkconfig' }
|
||||
- { opeation: 'journal' }
|
||||
|
||||
- name: Waiting for the JournalNode start,sleep 10s
|
||||
shell: sleep 10
|
||||
|
||||
- block:
|
||||
- name: checking JournalNode status
|
||||
shell: source /etc/profile && jps | grep JournalNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking JournalNode
|
||||
fail:
|
||||
msg: "JournalNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*journalnode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: inventory_hostname in [master_ip,slave1_ip,slave2_ip]
|
||||
|
||||
- name: Initialization NameNode/ZKFC,Start master NameNode
|
||||
block:
|
||||
- name: initialization hadoop NameNode
|
||||
shell: sh {{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh namenode | grep "yes" | grep -v grep | wc -l
|
||||
register: ini_namenode_out
|
||||
|
||||
- name: checking namenode init status
|
||||
fail:
|
||||
msg: "namenode 初始化异常,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: ini_namenode_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: initialization hadoop ZKFC
|
||||
shell: sh {{ deploy_dir }}/{{ hadoop_version }}/bin/ini_hdfs.sh zkfc | grep "yes" | grep -v grep | wc -l
|
||||
register: ini_zkfc_out
|
||||
|
||||
- name: checking hadoop-zk init status
|
||||
fail:
|
||||
msg: "hadoop-zk 初始化异常,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/"
|
||||
when: ini_zkfc_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: start hadoop Master node
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh master
|
||||
|
||||
- name: Waiting for the Master-namenode start,sleep 20s
|
||||
shell: sleep 20
|
||||
|
||||
- name: checking {{ master_ip }} NameNode status
|
||||
shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
|
||||
register: master_namenode_status
|
||||
|
||||
- name: checking master NameNode
|
||||
fail:
|
||||
msg: "NameNode-master未启动,请登陆[{{ master_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: master_namenode_status.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
run_once: true
|
||||
delegate_facts: true
|
||||
delegate_to: "{{ master_ip }}"
|
||||
|
||||
- name: Start slave NameNode
|
||||
block:
|
||||
- name: copying {{ master_ip }} NameNode files to Slave
|
||||
shell: "yes | {{ deploy_dir }}/{{ hadoop_version }}/bin/hdfs namenode -bootstrapStandby"
|
||||
|
||||
- name: start hadoop Slave node
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh slave
|
||||
|
||||
- name: Waiting for the Slave-namenode start,sleep 60s
|
||||
shell: sleep 60
|
||||
|
||||
- name: checking {{ slave1_ip }} NameNode status
|
||||
shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
|
||||
register: slave1_namenode_status
|
||||
|
||||
- name: checking slavel NameNode
|
||||
fail:
|
||||
msg: "NameNode-slave未启动,请登陆[{{ slave1_ip }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: slave1_namenode_status.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
run_once: true
|
||||
delegate_facts: true
|
||||
delegate_to: "{{ slave1_ip }}"
|
||||
|
||||
- name: Start DataNode
|
||||
block:
|
||||
- name: start hadoop Worker nodes
|
||||
shell: cd {{ deploy_dir }}/{{ hadoop_version }}/bin/ && ./set_hdfs_env.sh worker
|
||||
|
||||
- name: Waiting for the DataNode start,sleep 60s
|
||||
shell: sleep 60
|
||||
|
||||
- name: checking DataNode status
|
||||
shell: source /etc/profile && jps | grep DataNode | grep -v grep | wc -l
|
||||
register: datanode_status
|
||||
|
||||
- name: checking DataNode
|
||||
fail:
|
||||
msg: "DataNode未启动,请登陆[{{ inventory_hostname }}],保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*datanode*"
|
||||
when: datanode_status.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: delete {{ deploy_dir }}/hadoop-2.7.1.tar.gz
|
||||
file:
|
||||
path: "{{ deploy_dir }}/{{ hadoop_version }}.tar.gz"
|
||||
state: absent
|
||||
9
Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml
Normal file
9
Apache Hadoop/2.7.1/hdfs/role/tasks/main.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- block:
|
||||
- include: uninstall.yml
|
||||
- include: deploy.yml
|
||||
- include: status-check.yml
|
||||
when: (operation) == "install"
|
||||
|
||||
- block:
|
||||
- include: uninstall.yml
|
||||
when: (operation) == "uninstall"
|
||||
53
Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml
Normal file
53
Apache Hadoop/2.7.1/hdfs/role/tasks/status-check.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
- name: Setting node_nums variable
|
||||
set_fact: node_nums="{{groups.hdfs|length}}"
|
||||
|
||||
- name: Waiting for the HDFS start,sleep 30s
|
||||
shell: sleep 30
|
||||
|
||||
- block:
|
||||
- name: checking JournalNode status
|
||||
shell: source /etc/profile && jps | grep JournalNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking JournalNode
|
||||
fail:
|
||||
msg: "JournalNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*journalnode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: inventory_hostname in groups['hdfs'][0:3]
|
||||
|
||||
- block:
|
||||
- name: checking DFSZKFailoverController status
|
||||
shell: source /etc/profile && jps | grep DFSZKFailoverController | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking DFSZKFailoverController
|
||||
fail:
|
||||
msg: "DFSZKFailoverController节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*zkfc*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
|
||||
- name: checking NameNode status
|
||||
shell: source /etc/profile && jps | grep NameNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking NameNode
|
||||
fail:
|
||||
msg: "NameNode节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*namenode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: inventory_hostname in groups['hdfs'][0:2]
|
||||
|
||||
- name: checking DataNode status
|
||||
shell: source /etc/profile && jps | grep DataNode | grep -v grep | wc -l
|
||||
register: status_out
|
||||
|
||||
- name: checking DataNode
|
||||
fail:
|
||||
msg: "DFSZKFailoverController节点启动异常,请登陆{{ inventory_hostname }},保留日志反馈,路径:{{ deploy_dir }}/{{ hadoop_version }}/logs/*datanode*"
|
||||
when: status_out.stdout != '1'
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
38
Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml
Normal file
38
Apache Hadoop/2.7.1/hdfs/role/tasks/uninstall.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
- block:
|
||||
- name: copy unload_hdfs.sh to {{ deploy_dir }}/
|
||||
template:
|
||||
src: 'unload_hdfs.sh.j2'
|
||||
dest: '{{ deploy_dir }}/unload_hdfs.sh'
|
||||
force: true
|
||||
mode: 0755
|
||||
|
||||
- name: unload hadoop
|
||||
shell: cd {{ deploy_dir }} && sh unload_hdfs.sh
|
||||
|
||||
- name: Ansible delete {{ deploy_dir }}/unload_hdfs.sh
|
||||
file:
|
||||
path: "{{ deploy_dir }}/unload_hdfs.sh"
|
||||
state: absent
|
||||
|
||||
- name: Checking ZooKeeper has Hadoop nodes
|
||||
shell: docker exec zookeeper zkCli.sh ls / | grep -w "hadoop-ha" | wc -l
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
register: has_zknode
|
||||
|
||||
- name: Delete Hadoop nodes in ZooKeeper
|
||||
shell: "docker exec zookeeper zkCli.sh rmr /hadoop-ha"
|
||||
run_once: true
|
||||
delegate_to: "{{ groups.zookeeper[0] }}"
|
||||
when: has_zknode.stdout >= '1'
|
||||
|
||||
- name: Check if the Hadoop service already exists
|
||||
shell: source /etc/profile && jps -l | egrep "org.apache.hadoop.hdfs.qjournal.server.JournalNode|org.apache.hadoop.hdfs.tools.DFSZKFailoverController|org.apache.hadoop.hdfs.server.datanode.DataNode|org.apache.hadoop.hdfs.server.namenode.NameNode" | wc -l
|
||||
register: check_out
|
||||
|
||||
- name: To terminate execution
|
||||
fail:
|
||||
msg: "卸载失败,组件可能非本安装部署,请手动卸载后继续安装"
|
||||
run_once: true
|
||||
delegate_to: 127.0.0.1
|
||||
when: check_out.stdout >= '1'
|
||||
67
Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2
Normal file
67
Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2
Normal file
@@ -0,0 +1,67 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://ns1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.tmp.dir</name>
|
||||
<value>file:{{ hdfs_data_dir }}/tmp</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>io.file.buffer.size</name>
|
||||
<value>131702</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.root.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.root.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.logfile.size</name>
|
||||
<value>10000000</value>
|
||||
<description>The max size of each log file</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.logfile.count</name>
|
||||
<value>1</value>
|
||||
<description>The max number of log files</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>ha.zookeeper.quorum</name>
|
||||
{% for dev_info in groups.zookeeper -%}
|
||||
{% if loop.last -%}
|
||||
{{dev_info}}:2181</value>
|
||||
{% elif loop.first %}
|
||||
<value>{{dev_info}}:2181,
|
||||
{%- else %}
|
||||
{{dev_info}}:2181,
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
</property>
|
||||
<property>
|
||||
<name>ipc.client.connect.timeout</name>
|
||||
<value>90000</value>
|
||||
</property>
|
||||
</configuration>
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_JN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null
|
||||
set_log jnRes_sum JournalNode
|
||||
fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
|
||||
HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
|
||||
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_NN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
|
||||
set_log nnRes_sum NameNode
|
||||
fi
|
||||
|
||||
if [ $HAS_ZKFC -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
|
||||
set_log zkfcRes_sum DFSZKFailoverController
|
||||
fi
|
||||
|
||||
#if [ $HAS_NM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
|
||||
# set_log nmRes_sum NodeManager
|
||||
#fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
|
||||
HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
|
||||
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
|
||||
#HAS_RM=`ps -ef | grep ResourceManager | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_NN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
|
||||
set_log nnRes_sum NameNode
|
||||
fi
|
||||
|
||||
if [ $HAS_ZKFC -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
|
||||
set_log zkfcRes_sum DFSZKFailoverController
|
||||
fi
|
||||
|
||||
#if [ $HAS_NM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
|
||||
# set_log nmRes_sum NodeManager
|
||||
#fi
|
||||
|
||||
#if [ $HAS_RM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
|
||||
# set_log RMRes_sum ResourceManager
|
||||
#fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l`
|
||||
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_DN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null
|
||||
set_log dnRes_sum DataNode
|
||||
fi
|
||||
|
||||
#if [ $HAS_NM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
|
||||
# set_log nmRes_sum NodeManager
|
||||
#fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsjournal
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsjournal
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
journal=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
|
||||
if [ $journal -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop journalnode > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
num=`ps -ef | grep JournalNode | grep -v grep | wc -l`
|
||||
if [ "$num" -eq "1" ];then
|
||||
echo "JournalNode进程已启动"
|
||||
else
|
||||
echo "JournalNode进程未启动"
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsjournal [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsmaster
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsmaster
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
master=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
|
||||
if [ $master -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
hdfs haadmin -getServiceState nn1
|
||||
hdfs dfsadmin -report
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsmaster [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsslave
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsslave
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
slave=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
|
||||
if [ $slave -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
hdfs haadmin -getServiceState nn2
|
||||
hdfs dfsadmin -report
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsslave [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsworker
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsworker
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
worker=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
|
||||
if [ $worker -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop datanode > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
num=`ps -ef | grep DataNode | grep -v grep | wc -l`
|
||||
if [ "$num" -eq "1" ];then
|
||||
echo "DataNode进程已启动"
|
||||
else
|
||||
echo "DataNode进程未启动"
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsworker [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
105
Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2
Normal file
105
Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2
Normal file
@@ -0,0 +1,105 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Set Hadoop-specific environment variables here.
|
||||
|
||||
# The only required environment variable is JAVA_HOME. All others are
|
||||
# optional. When running a distributed configuration it is best to
|
||||
# set JAVA_HOME in this file, so that it is correctly defined on
|
||||
# remote nodes.
|
||||
export HADOOP_NAMENODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9905:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
|
||||
export HADOOP_DATANODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9906:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
|
||||
|
||||
# The java implementation to use.
|
||||
#export HADOOP_HEAPSIZE=m
|
||||
#export JAVA_HOME=/usr/local/jdk/jdk1.8.0_73
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
# The jsvc implementation to use. Jsvc is required to run secure datanodes
|
||||
# that bind to privileged ports to provide authentication of data transfer
|
||||
# protocol. Jsvc is not required if SASL is configured for authentication of
|
||||
# data transfer protocol using non-privileged ports.
|
||||
#export JSVC_HOME=${JSVC_HOME}
|
||||
|
||||
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
|
||||
|
||||
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
|
||||
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
|
||||
if [ "$HADOOP_CLASSPATH" ]; then
|
||||
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
|
||||
else
|
||||
export HADOOP_CLASSPATH=$f
|
||||
fi
|
||||
done
|
||||
|
||||
# The maximum amount of heap to use, in MB. Default is 1000.
|
||||
#export HADOOP_HEAPSIZE=
|
||||
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
|
||||
|
||||
# Extra Java runtime options. Empty by default.
|
||||
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
|
||||
|
||||
# Command specific options appended to HADOOP_OPTS when specified
|
||||
export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS {{ hadoop.namenode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-namenode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"
|
||||
|
||||
export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS {{ hadoop.datanode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-datanode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=ERROR,RFAS"
|
||||
|
||||
export HADOOP_JOURNALNODE_OPTS="$HADOOP_JOURNALNODE_OPTS {{ hadoop.journalnode.java_opt }}"
|
||||
|
||||
export HADOOP_ZKFC_OPTS="$HADOOP_ZKFC_OPTS {{ hadoop.zkfc.java_opt }}"
|
||||
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
|
||||
|
||||
export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
|
||||
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
|
||||
|
||||
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
|
||||
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
|
||||
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
|
||||
|
||||
# On secure datanodes, user to run the datanode as after dropping privileges.
|
||||
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
|
||||
# to provide authentication of data transfer protocol. This **MUST NOT** be
|
||||
# defined if SASL is configured for authentication of data transfer protocol
|
||||
# using non-privileged ports.
|
||||
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
|
||||
|
||||
# Where log files are stored. $HADOOP_HOME/logs by default.
|
||||
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
|
||||
|
||||
# Where log files are stored in the secure data environment.
|
||||
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
|
||||
|
||||
###
|
||||
# HDFS Mover specific parameters
|
||||
###
|
||||
# Specify the JVM options to be used when starting the HDFS Mover.
|
||||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_MOVER_OPTS=""
|
||||
|
||||
###
|
||||
# Advanced Users Only!
|
||||
###
|
||||
|
||||
# The directory where pid files are stored. /tmp by default.
|
||||
# NOTE: this should be set to a directory that can only be written to by
|
||||
# the user that will run the hadoop daemons. Otherwise there is the
|
||||
# potential for a symlink attack.
|
||||
export HADOOP_PID_DIR={{ deploy_dir }}/{{ hadoop_version }}/pids
|
||||
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
|
||||
# A string representing this instance of hadoop. $USER by default.
|
||||
export HADOOP_IDENT_STRING=$USER
|
||||
142
Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2
Normal file
142
Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2
Normal file
@@ -0,0 +1,142 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>file:{{ hdfs_data_dir }}/dfs/name</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>file:{{ hdfs_data_dir }}/dfs/data</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.webhdfs.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.permissions</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.permissions.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.nameservices</name>
|
||||
<value>ns1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.blocksize</name>
|
||||
<value>134217728</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.ns1</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<!-- nn1的RPC通信地址,nn1所在地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.ns1.nn1</name>
|
||||
<value>{{ groups.hdfs[0] }}:9000</value>
|
||||
</property>
|
||||
<!-- nn1的http通信地址,外部访问地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.ns1.nn1</name>
|
||||
<value>{{ groups.hdfs[0] }}:50070</value>
|
||||
</property>
|
||||
<!-- nn2的RPC通信地址,nn2所在地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.ns1.nn2</name>
|
||||
<value>{{ groups.hdfs[1] }}:9000</value>
|
||||
</property>
|
||||
<!-- nn2的http通信地址,外部访问地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.ns1.nn2</name>
|
||||
<value>{{ groups.hdfs[1] }}:50070</value>
|
||||
</property>
|
||||
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
|
||||
<property>
|
||||
<name>dfs.namenode.shared.edits.dir</name>
|
||||
<value>qjournal://{{groups.hdfs[0]}}:8485;{{groups.hdfs[1]}}:8485;{{groups.hdfs[2]}}:8485/ns1</value>
|
||||
</property>
|
||||
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
|
||||
<property>
|
||||
<name>dfs.journalnode.edits.dir</name>
|
||||
<value>{{ hdfs_data_dir }}/journal</value>
|
||||
</property>
|
||||
<!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.ns1</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
|
||||
<property>
|
||||
<name>dfs.ha.fencing.methods</name>
|
||||
<value>sshfence</value>
|
||||
<value>shell(true)</value>
|
||||
</property>
|
||||
<!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
|
||||
<property>
|
||||
<name>dfs.ha.fencing.ssh.private-key-files</name>
|
||||
<value>/root/.ssh/id_rsa</value>
|
||||
</property>
|
||||
<!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
|
||||
<property>
|
||||
<name>dfs.ha.fencing.ssh.connect-timeout</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
<!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
|
||||
<property>
|
||||
<name>dfs.ha.automatic-failover.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.max.transfer.threads</name>
|
||||
<value>8192</value>
|
||||
</property>
|
||||
<!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
|
||||
<property>
|
||||
<name>dfs.namenode.handler.count</name>
|
||||
<value>{{ hadoop.namenode['dfs.namenode.handler.count'] }}</value>
|
||||
</property>
|
||||
<!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
|
||||
<property>
|
||||
<name>dfs.datanode.handler.count</name>
|
||||
<value>{{ hadoop.datanode['dfs.datanode.handler.count'] }}</value>
|
||||
</property>
|
||||
<!-- balance时可占用的带宽 -->
|
||||
<property>
|
||||
<name>dfs.balance.bandwidthPerSec</name>
|
||||
<value>104857600</value>
|
||||
</property>
|
||||
<!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
|
||||
<property>
|
||||
<name>dfs.datanode.du.reserved</name>
|
||||
<value>53687091200</value>
|
||||
</property>
|
||||
<!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
|
||||
<property>
|
||||
<name>heartbeat.recheck.interval</name>
|
||||
<value>100000</value>
|
||||
</property>
|
||||
</configuration>
|
||||
|
||||
46
Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2
Normal file
46
Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
MASTER_IP={{ groups.hdfs[0] }}
|
||||
SLAVE1_IP={{ groups.hdfs[1] }}
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function ini_namenode() {
|
||||
|
||||
cd $BASE_DIR/$VERSION/bin
|
||||
yes | ./hadoop namenode -format
|
||||
|
||||
if [ $? -eq "0" ];then
|
||||
# scp -r $BASE_DIR/hadoop/ root@$SLAVE1_IP:$BASE_DIR/
|
||||
echo yes
|
||||
else
|
||||
echo no
|
||||
fi
|
||||
}
|
||||
|
||||
function ini_zk() {
|
||||
|
||||
cd $BASE_DIR/$VERSION/bin
|
||||
yes | ./hdfs zkfc -formatZK
|
||||
|
||||
if [ $? -eq "0" ];then
|
||||
echo yes
|
||||
else
|
||||
echo no
|
||||
fi
|
||||
}
|
||||
|
||||
case $1 in
|
||||
[namenode]*)
|
||||
ini_namenode
|
||||
;;
|
||||
[zkfc]*)
|
||||
ini_zk
|
||||
;;
|
||||
* )
|
||||
echo "请输入已有的指令."
|
||||
;;
|
||||
esac
|
||||
|
||||
33
Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2
Normal file
33
Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2
Normal file
@@ -0,0 +1,33 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapreduce.framework.name</name>
|
||||
<value>yarn</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.address</name>
|
||||
<value>{{ groups.hdfs[0] }}:10020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.webapp.address</name>
|
||||
<value>{{ groups.hdfs[0] }}:19888</value>
|
||||
</property>
|
||||
</configuration>
|
||||
|
||||
71
Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2
Normal file
71
Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
source /etc/profile
|
||||
|
||||
function setChkconfig(){
|
||||
echo -e "\n#hadoop\nexport HADOOP_HOME={{ deploy_dir }}/{{ hadoop_version }}\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
|
||||
chmod +x /etc/profile.d/hadoop.sh
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsmaster' ];then
|
||||
chkconfig --add keephdfsmaster
|
||||
chkconfig keephdfsmaster on
|
||||
fi
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsslave' ];then
|
||||
chkconfig --add keephdfsslave
|
||||
chkconfig keephdfsslave on
|
||||
fi
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsworker' ];then
|
||||
chkconfig --add keephdfsworker
|
||||
chkconfig keephdfsworker on
|
||||
fi
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsjournal' ];then
|
||||
chkconfig --add keephdfsjournal
|
||||
chkconfig keephdfsjournal on
|
||||
fi
|
||||
}
|
||||
|
||||
case $1 in
|
||||
journal)
|
||||
if [ -x '/etc/init.d/keephdfsjournal' ];then
|
||||
service keephdfsjournal start && sleep 5
|
||||
journal_dae=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
|
||||
if [ $journal_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
master)
|
||||
if [ -x '/etc/init.d/keephdfsmaster' ];then
|
||||
service keephdfsmaster start && sleep 5
|
||||
master_dae=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
|
||||
if [ $master_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
slave)
|
||||
if [ -x '/etc/init.d/keephdfsslave' ];then
|
||||
service keephdfsslave start && sleep 5
|
||||
slave_dae=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
|
||||
if [ $slave_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
worker)
|
||||
if [ -x '/etc/init.d/keephdfsworker' ];then
|
||||
service keephdfsworker start && sleep 5
|
||||
worker_dae=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
|
||||
if [ $worker_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
chkconfig)
|
||||
setChkconfig;;
|
||||
* )
|
||||
;;
|
||||
esac
|
||||
4
Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2
Normal file
4
Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2
Normal file
@@ -0,0 +1,4 @@
|
||||
{% set combined_group = groups.hdfs %}
|
||||
{% for dev_info in combined_group %}
|
||||
{{dev_info}}
|
||||
{% endfor %}
|
||||
86
Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2
Normal file
86
Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
function killService(){
|
||||
keeppath='/etc/init.d/keephdfsjournal'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsjournal stop
|
||||
chkconfig keephdfsjournal off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsjournal
|
||||
fi
|
||||
|
||||
keeppath='/etc/init.d/keephdfsmaster'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsmaster stop
|
||||
chkconfig keephdfsmaster off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsmaster
|
||||
fi
|
||||
|
||||
keeppath='/etc/init.d/keephdfsslave'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsslave stop
|
||||
chkconfig keephdfsslave off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsslave
|
||||
fi
|
||||
|
||||
keeppath='/etc/init.d/keephdfsworker'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsworker stop
|
||||
chkconfig keephdfsworker off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsworker
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function killPid(){
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.qjournal.server.JournalNode" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.qjournal.server.JournalNode" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.tools.DFSZKFailoverController" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.tools.DFSZKFailoverController" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.server.datanode.DataNode" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.server.datanode.DataNode" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.server.namenode.NameNode" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.server.namenode.NameNode" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
}
|
||||
|
||||
function drop_folder(){
|
||||
FOLDER_NAME=$1
|
||||
|
||||
if [ -d "$FOLDER_NAME" ];then
|
||||
rm -rf $FOLDER_NAME
|
||||
fi
|
||||
}
|
||||
|
||||
function drop_file(){
|
||||
FILE_NAME=$1
|
||||
|
||||
if [ -f "$FILE_NAME" ];then
|
||||
rm -rf $FILE_NAME
|
||||
fi
|
||||
}
|
||||
|
||||
killService
|
||||
sleep 15
|
||||
killPid
|
||||
drop_folder {{ deploy_dir }}/{{ hadoop_version }}
|
||||
drop_folder {{ data_dir }}/{{ hadoop_version }}
|
||||
drop_file /etc/profile.d/hadoop.sh
|
||||
8
Apache Hadoop/2.7.1/hdfs/role/vars/main.yml
Normal file
8
Apache Hadoop/2.7.1/hdfs/role/vars/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
#hadoop版本
|
||||
hadoop_version: hadoop-2.7.1
|
||||
|
||||
#数据目录
|
||||
hdfs_data_dir: "{{ data_dir }}/{{ hadoop_version }}/data/hadoop"
|
||||
|
||||
#jdk版本
|
||||
java_version: 1.8.0_73
|
||||
Reference in New Issue
Block a user