場景:
CentOS6.3的二次封裝版本,安裝hortonworks的hadoop發行版,並按照其官方文檔實現高可用,但無fence設備支持,因此導致斷網和斷電測試時,備用HA節點無法得到資源釋放的通知。
因此,筆者簡單寫了一個腳本,讓HA的備節點在主節點斷網和斷電時能夠通過簡單測試得知並獲取資源,以實現真正意義的HA。
思考:
如何判斷網絡的連通性?
如何判斷VIP的可用性?
如何判斷自身的故障?
如何盡可能防止腦裂?
實現:
1、當自身節點IP均不正常時,使本節點重啟
2、當檢測自身網絡正常,且VIP不在線,且日志指出對方HA節點故障時獲取資源
3、ganglia的VIP連接修復(不在本文主要思考范圍)
代碼如下:
#!/bin/bash STD=$$ Cron_if=`ps aux|grep $0|grep -vE "grep|$STD"|/usr/bin/wc -l` [[ "$Cron_if" -ge 2 ]] && exit 2 sleep 30 VIP=${VIP:-192.168.1.198} RE_PRD=${RE_PRD:-10} SAFE_TIME=${SAFE_TIME:-60} NMK=${NMK:-8} NUL=/dev/null Date="/bin/date +%k:%M:%S/%Y-%m-%d" [ ! -f /etc/sysconfig/hdp.conf ] && echo "`$Date` Error: No such config fil." >> $Mlog && exit 1 [ ! -f /etc/init.d/cman ] && echo "`$Date` War: `uname -n` Invalid HA node." >> $Mlog && exit 2 [ ! -f /etc/init.d/rgmanager ] && echo "`$Date` War: `uname -n` Invalid HA node." >> $Mlog && exit 2 [ -f /etc/sysconfig/hdp.conf ] && . /etc/sysconfig/hdp.conf while :;do RQE1=`/etc/init.d/rgmanager status 2> $Mlog|grep "is running."` RQE2=`/etc/init.d/cman status 2> $Mlog|grep "is running."` RQE3=`/etc/init.d/ricci status 2> $Mlog|grep "is running."` RQE4=`/etc/init.d/modclusterd status 2> $Mlog|grep "is running."` [ -z "$RQE2" ] && /etc/init.d/cman start &> $NUL [ -z "$RQE1" ] && /etc/init.d/rgmanager start &> $NUL [ -z "$RQE3" ] && /etc/init.d/ricci start &> $NUL [ -z "$RQE4" ] && /etc/init.d/modclusterd start &> $NUL if [[ -n "$RQE1" && -n "$RQE2" ]];then break else sleep $SAFE_TIME continue fi done NODE=(`grep clusternode /etc/cluster/cluster.conf|grep nodeid|awk -F\" '{print $2}'`) for i in ${NODE[@]};do NODE_IP=`grep $i /etc/hosts|awk '{print $1}'` JUDG_VAR=`/sbin/ip a|grep $NODE_IP` [ -n "$JUDG_VAR" ] && N_NAME=$i [ -z "$JUDG_VAR" ] && R_NAME=$i done Node_Reboot(){ W_VIP=`/sbin/ip a show $Ne|grep $VIP` Nic_File=/etc/sysconfig/network-scripts/ifcfg-$Ne PHY_IP_FILE=`grep IPADDR $Nic_File|awk -F\= '{print $2}'` IP_FILE_IF=`/sbin/ifconfig $Ne|grep $PHY_IP_FILE` if [[ -z "$W_VIP" && -z "$IP_FILE_IF" ]];then KILL_PID=(`ps aux|grep -E "rgmanager|fenced|dlm_controld|gfs_controld|corosync"|grep -v grep|awk '{print $2}'`) for i in ${KILL_PID[@]};do kill -9 $i &> $NUL done /etc/init.d/rgmanager stop &> $NUL /etc/init.d/cman stop &> $NUL /sbin/reboot &> $NUL fi } FREE_RESOURCE(){ DFGW=`/sbin/route -n|grep $Ne|awk '{print $2}'|grep -v "0.0.0.0"` NFS_IP=`grep netfs /etc/cluster/cluster.conf|awk -F\" '{print $8}'|grep -v "^$"` P_CMD="/bin/ping -c 3 -W 1" if ! $P_CMD $VIP &>$NUL;then if $P_CMD $DFGW &>$NUL || $P_CMD $R_NAME &>$NUL || $P_CMD $NFS_IP &>$NUL ;then if ! $P_CMD $VIP &>$NUL;then DOWN_LOG=`/usr/bin/tail -1 /var/log/cluster/rgmanager.log|grep "$R_NAME DOWN"` FENCE_LOG=`tail -1 /var/log/cluster/fenced.log|grep "$R_NAME failed"` if [[ -n "$DOWN_LOG" && -n "$FENCE_LOG" ]];then echo absolutely|/usr/sbin/fence_ack_manual $R_NAME fi fi else echo "`$Date` Gateway:$DFGW and HA_node:$R_NAME and Nfs:$NFS_IP Offline!!" >> $Mlog fi fi } GGA(){ RE=`netstat -anup|grep gmond|grep $VIP|/usr/bin/wc -l` [ "$RE" -eq 4 ] && return 0 MGF=/etc/ganglia/gmond.conf RE=`grep -A 2 -E "udp_send_channel|tcp_accept_channel" $MGF|grep $VIP|/usr/bin/wc -l` if [ "$RE" -ne 2 ];then sed -i "/^udp_accept_channel/a\ \bind = $VIP" $MGF sed -i "/^tcp_accept_channel/a\ \bind = $VIP" $MGF fi GFL=(`find /etc/ganglia/hdp -name "gmond.slave.conf"`) for g in ${GFL[@]};do if grep "bind = $VIP" $g &> $NUL;then continue fi sed -i "/\<host\>/i\ \ bind = $VIP" $g done /etc/init.d/gmond restart &> $NUL /etc/init.d/hdp-gmond restart &> $NUL } while :;do GGA FREE_RESOURCE Node_Reboot sleep $RE_PRD done