提交各组件部署Ansible剧本初版

This commit is contained in:
qidaijie
2024-01-18 15:35:33 +08:00
parent f0bd05d565
commit 0cc392df5c
262 changed files with 15927 additions and 0 deletions

View File

@@ -0,0 +1,42 @@
#!/bin/bash
source /etc/profile
BASE_DIR={{ deploy_dir }}
VERSION={{ hadoop_version }}
function set_log(){
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
if [ ! -f "$RES_SUM_FILE/" ]
then
mkdir -p $RES_SUM_FILE
fi
if [ ! -d "$RES_SUM_FILE/$1" ];then
echo "0" > $RES_SUM_FILE/$1
fi
OLD_NUM=`cat $RES_SUM_FILE/$1`
RESTART_NUM=`expr $OLD_NUM + 1`
echo $RESTART_NUM > $RES_SUM_FILE/$1
if [ $OLD_NUM -eq "0" ];then
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
else
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
fi
}
while true ; do
HAS_JN=`ps -ef | grep JournalNode | grep -v grep | wc -l`
if [ $HAS_JN -eq "0" ];then
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start journalnode > /dev/null
set_log jnRes_sum JournalNode
fi
sleep 60
done

View File

@@ -0,0 +1,53 @@
#!/bin/bash
source /etc/profile
BASE_DIR={{ deploy_dir }}
VERSION={{ hadoop_version }}
function set_log(){
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
if [ ! -f "$RES_SUM_FILE/" ]
then
mkdir -p $RES_SUM_FILE
fi
if [ ! -d "$RES_SUM_FILE/$1" ];then
echo "0" > $RES_SUM_FILE/$1
fi
OLD_NUM=`cat $RES_SUM_FILE/$1`
RESTART_NUM=`expr $OLD_NUM + 1`
echo $RESTART_NUM > $RES_SUM_FILE/$1
if [ $OLD_NUM -eq "0" ];then
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
else
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
fi
}
while true ; do
HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
if [ $HAS_NN -eq "0" ];then
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
set_log nnRes_sum NameNode
fi
if [ $HAS_ZKFC -eq "0" ];then
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
set_log zkfcRes_sum DFSZKFailoverController
fi
#if [ $HAS_NM -eq "0" ];then
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
# set_log nmRes_sum NodeManager
#fi
sleep 60
done

View File

@@ -0,0 +1,60 @@
#!/bin/bash
source /etc/profile
BASE_DIR={{ deploy_dir }}
VERSION={{ hadoop_version }}
function set_log(){
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
if [ ! -f "$RES_SUM_FILE/" ]
then
mkdir -p $RES_SUM_FILE
fi
if [ ! -d "$RES_SUM_FILE/$1" ];then
echo "0" > $RES_SUM_FILE/$1
fi
OLD_NUM=`cat $RES_SUM_FILE/$1`
RESTART_NUM=`expr $OLD_NUM + 1`
echo $RESTART_NUM > $RES_SUM_FILE/$1
if [ $OLD_NUM -eq "0" ];then
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
else
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
fi
}
while true ; do
HAS_NN=`ps -ef | grep NameNode | grep -v grep | wc -l`
HAS_ZKFC=`ps -ef | grep DFSZKFailoverController | grep -v grep | wc -l`
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
#HAS_RM=`ps -ef | grep ResourceManager | grep -v grep | wc -l`
if [ $HAS_NN -eq "0" ];then
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start namenode > /dev/null
set_log nnRes_sum NameNode
fi
if [ $HAS_ZKFC -eq "0" ];then
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start zkfc > /dev/null
set_log zkfcRes_sum DFSZKFailoverController
fi
#if [ $HAS_NM -eq "0" ];then
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
# set_log nmRes_sum NodeManager
#fi
#if [ $HAS_RM -eq "0" ];then
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start resourcemanager > /dev/null
# set_log RMRes_sum ResourceManager
#fi
sleep 60
done

View File

@@ -0,0 +1,47 @@
#!/bin/bash
source /etc/profile
BASE_DIR={{ deploy_dir }}
VERSION={{ hadoop_version }}
function set_log(){
RES_SUM_FILE=$BASE_DIR/$VERSION/logs
if [ ! -f "$RES_SUM_FILE/" ]
then
mkdir -p $RES_SUM_FILE
fi
if [ ! -d "$RES_SUM_FILE/$1" ];then
echo "0" > $RES_SUM_FILE/$1
fi
OLD_NUM=`cat $RES_SUM_FILE/$1`
RESTART_NUM=`expr $OLD_NUM + 1`
echo $RESTART_NUM > $RES_SUM_FILE/$1
if [ $OLD_NUM -eq "0" ];then
echo "`date "+%Y-%m-%d %H:%M:%S"` - Hadoop $2服务初次启动" >> $BASE_DIR/$VERSION/logs/restart.log
else
echo "`date +%Y-%m-%d` `date +%H:%M:%S` - Hadoop $2服务异常 - 重启次数 -> $RESTART_NUM." >> $BASE_DIR/$VERSION/logs/restart.log
fi
}
while true ; do
HAS_DN=`ps -ef | grep DataNode | grep -v grep | wc -l`
#HAS_NM=`ps -ef | grep NodeManager | grep -v grep | wc -l`
if [ $HAS_DN -eq "0" ];then
yes | $BASE_DIR/$VERSION/sbin/hadoop-daemon.sh start datanode > /dev/null
set_log dnRes_sum DataNode
fi
#if [ $HAS_NM -eq "0" ];then
# $BASE_DIR/$VERSION/sbin/yarn-daemon.sh start nodemanager > /dev/null
# set_log nmRes_sum NodeManager
#fi
sleep 60
done

View File

@@ -0,0 +1,47 @@
#!/bin/bash
#
# netconsole This loads the netconsole module with the configured parameters.
#
# chkconfig:123456 40 60
# description: keephdfsjournal
source /etc/profile
PRO_NAME=keephdfsjournal
INS_DIR={{ deploy_dir }}
#版本
VERSION={{ hadoop_version }}
case $1 in
start)
journal=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | wc -l`
if [ $journal -lt 1 ];then
nohup $INS_DIR/$VERSION/sbin/dae-hdfsjournal.sh > /dev/null 2>&1 &
fi
;;
stop)
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsjournal.sh | grep -v grep | awk '{print $2}'`
if [ $HAS_KEEP_SHELL ];then
echo "守护进程PID$HAS_KEEP_SHELL"
kill -9 $HAS_KEEP_SHELL
fi
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop journalnode > /dev/null
;;
status)
num=`ps -ef | grep JournalNode | grep -v grep | wc -l`
if [ "$num" -eq "1" ];then
echo "JournalNode进程已启动"
else
echo "JournalNode进程未启动"
fi
;;
* )
echo "use keephdfsjournal [start|stop|status]"
;;
esac

View File

@@ -0,0 +1,42 @@
#!/bin/bash
#
# netconsole This loads the netconsole module with the configured parameters.
#
# chkconfig:123456 40 60
# description: keephdfsmaster
source /etc/profile
PRO_NAME=keephdfsmaster
INS_DIR={{ deploy_dir }}
#版本
VERSION={{ hadoop_version }}
case $1 in
start)
master=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | wc -l`
if [ $master -lt 1 ];then
nohup $INS_DIR/$VERSION/sbin/dae-hdfsmaster.sh > /dev/null 2>&1 &
fi
;;
stop)
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsmaster.sh | grep -v grep | awk '{print $2}'`
if [ $HAS_KEEP_SHELL ];then
echo "守护进程PID$HAS_KEEP_SHELL"
kill -9 $HAS_KEEP_SHELL
fi
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
;;
status)
hdfs haadmin -getServiceState nn1
hdfs dfsadmin -report
;;
* )
echo "use keephdfsmaster [start|stop|status]"
;;
esac

View File

@@ -0,0 +1,42 @@
#!/bin/bash
#
# netconsole This loads the netconsole module with the configured parameters.
#
# chkconfig:123456 40 60
# description: keephdfsslave
source /etc/profile
PRO_NAME=keephdfsslave
INS_DIR={{ deploy_dir }}
#版本
VERSION={{ hadoop_version }}
case $1 in
start)
slave=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | wc -l`
if [ $slave -lt 1 ];then
nohup $INS_DIR/$VERSION/sbin/dae-hdfsslave.sh > /dev/null 2>&1 &
fi
;;
stop)
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsslave.sh | grep -v grep | awk '{print $2}'`
if [ $HAS_KEEP_SHELL ];then
echo "守护进程PID$HAS_KEEP_SHELL"
kill -9 $HAS_KEEP_SHELL
fi
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop namenode > /dev/null
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop zkfc > /dev/null
;;
status)
hdfs haadmin -getServiceState nn2
hdfs dfsadmin -report
;;
* )
echo "use keephdfsslave [start|stop|status]"
;;
esac

View File

@@ -0,0 +1,47 @@
#!/bin/bash
#
# netconsole This loads the netconsole module with the configured parameters.
#
# chkconfig:123456 40 60
# description: keephdfsworker
source /etc/profile
PRO_NAME=keephdfsworker
INS_DIR={{ deploy_dir }}
#版本
VERSION={{ hadoop_version }}
case $1 in
start)
worker=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | wc -l`
if [ $worker -lt 1 ];then
nohup $INS_DIR/$VERSION/sbin/dae-hdfsworker.sh > /dev/null 2>&1 &
fi
;;
stop)
HAS_KEEP_SHELL=`ps -ef | grep dae-hdfsworker.sh | grep -v grep | awk '{print $2}'`
if [ $HAS_KEEP_SHELL ];then
echo "守护进程PID$HAS_KEEP_SHELL"
kill -9 $HAS_KEEP_SHELL
fi
sh $INS_DIR/$VERSION/sbin/hadoop-daemon.sh stop datanode > /dev/null
;;
status)
num=`ps -ef | grep DataNode | grep -v grep | wc -l`
if [ "$num" -eq "1" ];then
echo "DataNode进程已启动"
else
echo "DataNode进程未启动"
fi
;;
* )
echo "use keephdfsworker [start|stop|status]"
;;
esac