提交各组件部署Ansible剧本初版
This commit is contained in:
67
Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2
Normal file
67
Apache Hadoop/2.7.1/hdfs/role/templates/core-site.xml.j2
Normal file
@@ -0,0 +1,67 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://ns1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.tmp.dir</name>
|
||||
<value>file:{{ hdfs_data_dir }}/tmp</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>io.file.buffer.size</name>
|
||||
<value>131702</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.root.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.proxyuser.root.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.logfile.size</name>
|
||||
<value>10000000</value>
|
||||
<description>The max size of each log file</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.logfile.count</name>
|
||||
<value>1</value>
|
||||
<description>The max number of log files</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>ha.zookeeper.quorum</name>
|
||||
{% for dev_info in groups.zookeeper -%}
|
||||
{% if loop.last -%}
|
||||
{{dev_info}}:2181</value>
|
||||
{% elif loop.first %}
|
||||
<value>{{dev_info}}:2181,
|
||||
{%- else %}
|
||||
{{dev_info}}:2181,
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
</property>
|
||||
<property>
|
||||
<name>ipc.client.connect.timeout</name>
|
||||
<value>90000</value>
|
||||
</property>
|
||||
</configuration>
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_JN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null
|
||||
set_log jnRes_sum JournalNode
|
||||
fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
|
||||
HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
|
||||
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_NN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
|
||||
set_log nnRes_sum NameNode
|
||||
fi
|
||||
|
||||
if [ $HAS_ZKFC -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
|
||||
set_log zkfcRes_sum DFSZKFailoverController
|
||||
fi
|
||||
|
||||
#if [ $HAS_NM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
|
||||
# set_log nmRes_sum NodeManager
|
||||
#fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
|
||||
HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
|
||||
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
|
||||
#HAS_RM=`ps -ef | grep ResourceManager | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_NN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
|
||||
set_log nnRes_sum NameNode
|
||||
fi
|
||||
|
||||
if [ $HAS_ZKFC -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
|
||||
set_log zkfcRes_sum DFSZKFailoverController
|
||||
fi
|
||||
|
||||
#if [ $HAS_NM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
|
||||
# set_log nmRes_sum NodeManager
|
||||
#fi
|
||||
|
||||
#if [ $HAS_RM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
|
||||
# set_log RMRes_sum ResourceManager
|
||||
#fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function set_log(){
|
||||
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
|
||||
|
||||
if [ ! -f "$RES_SUM_FILE/" ]
|
||||
then
|
||||
mkdir -p $RES_SUM_FILE
|
||||
fi
|
||||
|
||||
if [ ! -d "$RES_SUM_FILE/$1" ];then
|
||||
echo "0" > $RES_SUM_FILE/$1
|
||||
fi
|
||||
|
||||
OLD_NUM=`cat $RES_SUM_FILE/$1`
|
||||
RESTART_NUM=`expr $OLD_NUM + 1`
|
||||
echo $RESTART_NUM > $RES_SUM_FILE/$1
|
||||
if [ $OLD_NUM -eq "0" ];then
|
||||
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
else
|
||||
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
while true ; do
|
||||
|
||||
HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l`
|
||||
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
|
||||
|
||||
if [ $HAS_DN -eq "0" ];then
|
||||
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null
|
||||
set_log dnRes_sum DataNode
|
||||
fi
|
||||
|
||||
#if [ $HAS_NM -eq "0" ];then
|
||||
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
|
||||
# set_log nmRes_sum NodeManager
|
||||
#fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsjournal
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsjournal
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
journal=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
|
||||
if [ $journal -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop journalnode > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
num=`ps -ef | grep JournalNode | grep -v grep | wc -l`
|
||||
if [ "$num" -eq "1" ];then
|
||||
echo "JournalNode进程已启动"
|
||||
else
|
||||
echo "JournalNode进程未启动"
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsjournal [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsmaster
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsmaster
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
master=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
|
||||
if [ $master -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
hdfs haadmin -getServiceState nn1
|
||||
hdfs dfsadmin -report
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsmaster [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsslave
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsslave
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
slave=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
|
||||
if [ $slave -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
hdfs haadmin -getServiceState nn2
|
||||
hdfs dfsadmin -report
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsslave [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# netconsole This loads the netconsole module with the configured parameters.
|
||||
#
|
||||
# chkconfig:123456 40 60
|
||||
# description: keephdfsworker
|
||||
source /etc/profile
|
||||
PRO_NAME=keephdfsworker
|
||||
|
||||
INS_DIR={{ deploy_dir }}
|
||||
#版本
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
case $1 in
|
||||
start)
|
||||
worker=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
|
||||
if [ $worker -lt 1 ];then
|
||||
nohup $INS_DIR/$VERSION/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
stop)
|
||||
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | awk '{print $2}'`
|
||||
if [ $HAS_KEEP_SHELL ];then
|
||||
echo "守护进程PID:$HAS_KEEP_SHELL"
|
||||
kill -9 $HAS_KEEP_SHELL
|
||||
fi
|
||||
|
||||
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop datanode > /dev/null
|
||||
;;
|
||||
|
||||
status)
|
||||
num=`ps -ef | grep DataNode | grep -v grep | wc -l`
|
||||
if [ "$num" -eq "1" ];then
|
||||
echo "DataNode进程已启动"
|
||||
else
|
||||
echo "DataNode进程未启动"
|
||||
fi
|
||||
|
||||
;;
|
||||
|
||||
* )
|
||||
echo "use keephdfsworker [start|stop|status]"
|
||||
;;
|
||||
esac
|
||||
|
||||
105
Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2
Normal file
105
Apache Hadoop/2.7.1/hdfs/role/templates/hadoop-env.sh.j2
Normal file
@@ -0,0 +1,105 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Set Hadoop-specific environment variables here.
|
||||
|
||||
# The only required environment variable is JAVA_HOME. All others are
|
||||
# optional. When running a distributed configuration it is best to
|
||||
# set JAVA_HOME in this file, so that it is correctly defined on
|
||||
# remote nodes.
|
||||
export HADOOP_NAMENODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9905:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
|
||||
export HADOOP_DATANODE_JMX_OPTS="-Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -javaagent:{{ deploy_dir }}/{{ hadoop_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9906:{{ deploy_dir }}/{{ hadoop_version }}/monitor/hdfs.yaml"
|
||||
|
||||
# The java implementation to use.
|
||||
#export HADOOP_HEAPSIZE=m
|
||||
#export JAVA_HOME=/usr/local/jdk/jdk1.8.0_73
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
# The jsvc implementation to use. Jsvc is required to run secure datanodes
|
||||
# that bind to privileged ports to provide authentication of data transfer
|
||||
# protocol. Jsvc is not required if SASL is configured for authentication of
|
||||
# data transfer protocol using non-privileged ports.
|
||||
#export JSVC_HOME=${JSVC_HOME}
|
||||
|
||||
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
|
||||
|
||||
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
|
||||
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
|
||||
if [ "$HADOOP_CLASSPATH" ]; then
|
||||
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
|
||||
else
|
||||
export HADOOP_CLASSPATH=$f
|
||||
fi
|
||||
done
|
||||
|
||||
# The maximum amount of heap to use, in MB. Default is 1000.
|
||||
#export HADOOP_HEAPSIZE=
|
||||
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
|
||||
|
||||
# Extra Java runtime options. Empty by default.
|
||||
export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
|
||||
|
||||
# Command specific options appended to HADOOP_OPTS when specified
|
||||
export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS {{ hadoop.namenode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-namenode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}"
|
||||
|
||||
export HADOOP_DATANODE_OPTS="$HADOOP_DATANODE_OPTS {{ hadoop.datanode.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ deploy_dir }}/{{ hadoop_version }}/logs/gc-datanode-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath={{ deploy_dir }}/{{ hadoop_version }}/logs/ -Dhadoop.security.logger=ERROR,RFAS"
|
||||
|
||||
export HADOOP_JOURNALNODE_OPTS="$HADOOP_JOURNALNODE_OPTS {{ hadoop.journalnode.java_opt }}"
|
||||
|
||||
export HADOOP_ZKFC_OPTS="$HADOOP_ZKFC_OPTS {{ hadoop.zkfc.java_opt }}"
|
||||
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
|
||||
|
||||
export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
|
||||
export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
|
||||
|
||||
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
|
||||
export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
|
||||
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
|
||||
|
||||
# On secure datanodes, user to run the datanode as after dropping privileges.
|
||||
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
|
||||
# to provide authentication of data transfer protocol. This **MUST NOT** be
|
||||
# defined if SASL is configured for authentication of data transfer protocol
|
||||
# using non-privileged ports.
|
||||
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
|
||||
|
||||
# Where log files are stored. $HADOOP_HOME/logs by default.
|
||||
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
|
||||
|
||||
# Where log files are stored in the secure data environment.
|
||||
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
|
||||
|
||||
###
|
||||
# HDFS Mover specific parameters
|
||||
###
|
||||
# Specify the JVM options to be used when starting the HDFS Mover.
|
||||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_MOVER_OPTS=""
|
||||
|
||||
###
|
||||
# Advanced Users Only!
|
||||
###
|
||||
|
||||
# The directory where pid files are stored. /tmp by default.
|
||||
# NOTE: this should be set to a directory that can only be written to by
|
||||
# the user that will run the hadoop daemons. Otherwise there is the
|
||||
# potential for a symlink attack.
|
||||
export HADOOP_PID_DIR={{ deploy_dir }}/{{ hadoop_version }}/pids
|
||||
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
|
||||
# A string representing this instance of hadoop. $USER by default.
|
||||
export HADOOP_IDENT_STRING=$USER
|
||||
142
Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2
Normal file
142
Apache Hadoop/2.7.1/hdfs/role/templates/hdfs-site.xml.j2
Normal file
@@ -0,0 +1,142 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>file:{{ hdfs_data_dir }}/dfs/name</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>file:{{ hdfs_data_dir }}/dfs/data</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.webhdfs.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.permissions</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.permissions.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.nameservices</name>
|
||||
<value>ns1</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.blocksize</name>
|
||||
<value>134217728</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.ns1</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<!-- nn1的RPC通信地址,nn1所在地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.ns1.nn1</name>
|
||||
<value>{{ groups.hdfs[0] }}:9000</value>
|
||||
</property>
|
||||
<!-- nn1的http通信地址,外部访问地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.ns1.nn1</name>
|
||||
<value>{{ groups.hdfs[0] }}:50070</value>
|
||||
</property>
|
||||
<!-- nn2的RPC通信地址,nn2所在地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.ns1.nn2</name>
|
||||
<value>{{ groups.hdfs[1] }}:9000</value>
|
||||
</property>
|
||||
<!-- nn2的http通信地址,外部访问地址 -->
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.ns1.nn2</name>
|
||||
<value>{{ groups.hdfs[1] }}:50070</value>
|
||||
</property>
|
||||
<!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
|
||||
<property>
|
||||
<name>dfs.namenode.shared.edits.dir</name>
|
||||
<value>qjournal://{{groups.hdfs[0]}}:8485;{{groups.hdfs[1]}}:8485;{{groups.hdfs[2]}}:8485/ns1</value>
|
||||
</property>
|
||||
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
|
||||
<property>
|
||||
<name>dfs.journalnode.edits.dir</name>
|
||||
<value>{{ hdfs_data_dir }}/journal</value>
|
||||
</property>
|
||||
<!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.ns1</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
|
||||
<property>
|
||||
<name>dfs.ha.fencing.methods</name>
|
||||
<value>sshfence</value>
|
||||
<value>shell(true)</value>
|
||||
</property>
|
||||
<!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
|
||||
<property>
|
||||
<name>dfs.ha.fencing.ssh.private-key-files</name>
|
||||
<value>/root/.ssh/id_rsa</value>
|
||||
</property>
|
||||
<!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
|
||||
<property>
|
||||
<name>dfs.ha.fencing.ssh.connect-timeout</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
<!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
|
||||
<property>
|
||||
<name>dfs.ha.automatic-failover.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.datanode.max.transfer.threads</name>
|
||||
<value>8192</value>
|
||||
</property>
|
||||
<!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
|
||||
<property>
|
||||
<name>dfs.namenode.handler.count</name>
|
||||
<value>{{ hadoop.namenode['dfs.namenode.handler.count'] }}</value>
|
||||
</property>
|
||||
<!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
|
||||
<property>
|
||||
<name>dfs.datanode.handler.count</name>
|
||||
<value>{{ hadoop.datanode['dfs.datanode.handler.count'] }}</value>
|
||||
</property>
|
||||
<!-- balance时可占用的带宽 -->
|
||||
<property>
|
||||
<name>dfs.balance.bandwidthPerSec</name>
|
||||
<value>104857600</value>
|
||||
</property>
|
||||
<!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
|
||||
<property>
|
||||
<name>dfs.datanode.du.reserved</name>
|
||||
<value>53687091200</value>
|
||||
</property>
|
||||
<!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
|
||||
<property>
|
||||
<name>heartbeat.recheck.interval</name>
|
||||
<value>100000</value>
|
||||
</property>
|
||||
</configuration>
|
||||
|
||||
46
Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2
Normal file
46
Apache Hadoop/2.7.1/hdfs/role/templates/ini_hdfs.sh.j2
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
MASTER_IP={{ groups.hdfs[0] }}
|
||||
SLAVE1_IP={{ groups.hdfs[1] }}
|
||||
|
||||
BASE_DIR={{ deploy_dir }}
|
||||
VERSION={{ hadoop_version }}
|
||||
|
||||
function ini_namenode() {
|
||||
|
||||
cd $BASE_DIR/$VERSION/bin
|
||||
yes | ./hadoop namenode -format
|
||||
|
||||
if [ $? -eq "0" ];then
|
||||
# scp -r $BASE_DIR/hadoop/ root@$SLAVE1_IP:$BASE_DIR/
|
||||
echo yes
|
||||
else
|
||||
echo no
|
||||
fi
|
||||
}
|
||||
|
||||
function ini_zk() {
|
||||
|
||||
cd $BASE_DIR/$VERSION/bin
|
||||
yes | ./hdfs zkfc -formatZK
|
||||
|
||||
if [ $? -eq "0" ];then
|
||||
echo yes
|
||||
else
|
||||
echo no
|
||||
fi
|
||||
}
|
||||
|
||||
case $1 in
|
||||
[namenode]*)
|
||||
ini_namenode
|
||||
;;
|
||||
[zkfc]*)
|
||||
ini_zk
|
||||
;;
|
||||
* )
|
||||
echo "请输入已有的指令."
|
||||
;;
|
||||
esac
|
||||
|
||||
33
Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2
Normal file
33
Apache Hadoop/2.7.1/hdfs/role/templates/mapred-site.xml.j2
Normal file
@@ -0,0 +1,33 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>mapreduce.framework.name</name>
|
||||
<value>yarn</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.address</name>
|
||||
<value>{{ groups.hdfs[0] }}:10020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.webapp.address</name>
|
||||
<value>{{ groups.hdfs[0] }}:19888</value>
|
||||
</property>
|
||||
</configuration>
|
||||
|
||||
71
Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2
Normal file
71
Apache Hadoop/2.7.1/hdfs/role/templates/set_hdfs_env.sh.j2
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
source /etc/profile
|
||||
|
||||
function setChkconfig(){
|
||||
echo -e "\n#hadoop\nexport HADOOP_HOME={{ deploy_dir }}/{{ hadoop_version }}\nexport PATH=\$HADOOP_HOME/sbin:\$PATH\nexport PATH=\$HADOOP_HOME/bin:\$PATH\nexport HADOOP_CLASSPATH=\`hadoop classpath\`" >> /etc/profile.d/hadoop.sh
|
||||
chmod +x /etc/profile.d/hadoop.sh
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsmaster' ];then
|
||||
chkconfig --add keephdfsmaster
|
||||
chkconfig keephdfsmaster on
|
||||
fi
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsslave' ];then
|
||||
chkconfig --add keephdfsslave
|
||||
chkconfig keephdfsslave on
|
||||
fi
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsworker' ];then
|
||||
chkconfig --add keephdfsworker
|
||||
chkconfig keephdfsworker on
|
||||
fi
|
||||
|
||||
if [ -x '/etc/init.d/keephdfsjournal' ];then
|
||||
chkconfig --add keephdfsjournal
|
||||
chkconfig keephdfsjournal on
|
||||
fi
|
||||
}
|
||||
|
||||
case $1 in
|
||||
journal)
|
||||
if [ -x '/etc/init.d/keephdfsjournal' ];then
|
||||
service keephdfsjournal start && sleep 5
|
||||
journal_dae=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
|
||||
if [ $journal_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
master)
|
||||
if [ -x '/etc/init.d/keephdfsmaster' ];then
|
||||
service keephdfsmaster start && sleep 5
|
||||
master_dae=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
|
||||
if [ $master_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
slave)
|
||||
if [ -x '/etc/init.d/keephdfsslave' ];then
|
||||
service keephdfsslave start && sleep 5
|
||||
slave_dae=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
|
||||
if [ $slave_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
worker)
|
||||
if [ -x '/etc/init.d/keephdfsworker' ];then
|
||||
service keephdfsworker start && sleep 5
|
||||
worker_dae=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
|
||||
if [ $worker_dae -lt 1 ];then
|
||||
nohup {{ deploy_dir }}/{{ hadoop_version }}/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
chkconfig)
|
||||
setChkconfig;;
|
||||
* )
|
||||
;;
|
||||
esac
|
||||
4
Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2
Normal file
4
Apache Hadoop/2.7.1/hdfs/role/templates/slaves.j2
Normal file
@@ -0,0 +1,4 @@
|
||||
{% set combined_group = groups.hdfs %}
|
||||
{% for dev_info in combined_group %}
|
||||
{{dev_info}}
|
||||
{% endfor %}
|
||||
86
Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2
Normal file
86
Apache Hadoop/2.7.1/hdfs/role/templates/unload_hdfs.sh.j2
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
source /etc/profile
|
||||
|
||||
function killService(){
|
||||
keeppath='/etc/init.d/keephdfsjournal'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsjournal stop
|
||||
chkconfig keephdfsjournal off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsjournal
|
||||
fi
|
||||
|
||||
keeppath='/etc/init.d/keephdfsmaster'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsmaster stop
|
||||
chkconfig keephdfsmaster off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsmaster
|
||||
fi
|
||||
|
||||
keeppath='/etc/init.d/keephdfsslave'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsslave stop
|
||||
chkconfig keephdfsslave off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsslave
|
||||
fi
|
||||
|
||||
keeppath='/etc/init.d/keephdfsworker'
|
||||
if [ -x $keeppath ];then
|
||||
service keephdfsworker stop
|
||||
chkconfig keephdfsworker off
|
||||
systemctl daemon-reload
|
||||
rm -rf /etc/init.d/keephdfsworker
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function killPid(){
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.qjournal.server.JournalNode" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.qjournal.server.JournalNode" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.tools.DFSZKFailoverController" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.tools.DFSZKFailoverController" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.server.datanode.DataNode" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.server.datanode.DataNode" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
|
||||
livenum=`jps -l | egrep -w "org.apache.hadoop.hdfs.server.namenode.NameNode" | grep -v grep |wc -l`
|
||||
if [ $livenum -ne 0 ];then
|
||||
keeppid=`jps -l |egrep -w "org.apache.hadoop.hdfs.server.namenode.NameNode" | awk '{print $1}'`
|
||||
kill -9 $keeppid
|
||||
fi
|
||||
}
|
||||
|
||||
function drop_folder(){
|
||||
FOLDER_NAME=$1
|
||||
|
||||
if [ -d "$FOLDER_NAME" ];then
|
||||
rm -rf $FOLDER_NAME
|
||||
fi
|
||||
}
|
||||
|
||||
function drop_file(){
|
||||
FILE_NAME=$1
|
||||
|
||||
if [ -f "$FILE_NAME" ];then
|
||||
rm -rf $FILE_NAME
|
||||
fi
|
||||
}
|
||||
|
||||
killService
|
||||
sleep 15
|
||||
killPid
|
||||
drop_folder {{ deploy_dir }}/{{ hadoop_version }}
|
||||
drop_folder {{ data_dir }}/{{ hadoop_version }}
|
||||
drop_file /etc/profile.d/hadoop.sh
|
||||
Reference in New Issue
Block a user