本文共 3142 字,大约阅读时间需要 10 分钟。
HDFS服务启停,下面脚本需要在master节点运行,切需要在hdfs账号下。su hdfs
执行下面脚本
#!/bin/bashworker_cnt=`cat /etc/hosts | grep emr-worker | grep cluster | wc -l`master_cnt=1ha_flag=`grep -r high_availability_enable=true /usr/local/emr/emr-bin/script/`nn_file=/usr/local/emr/emr-bin/script/hdfs/pre_start.shdn_file=/usr/local/emr/emr-bin/script/hdfs/start_up.shif [[ ! -z $ha_flag ]];then master_cnt=2 nn_file=/usr/local/emr/emr-bin/script/ha_hdfs/pre_start.sh dn_file=/usr/local/emr/emr-bin/script/ha_hdfs/start_up.shfinn_cmd="export app_yarn_home=/usr/lib/hadoop-current;"\ `cat $nn_file | grep -v 'echo' | grep 'start namenode' | head -n 1 | awk -F '"' '{print $2;}'`dn_cmd="export app_yarn_home=/usr/lib/hadoop-current;"\ `cat $dn_file | grep -v 'echo' | grep 'start datanode' | head -n 1 | awk -F '"' '{print $2;}'`#start namenodefor ((i=1; i<=$master_cnt; i++));do echo master--$i echo "$nn_cmd" if [ $i -eq 2 ];then ssh emr-header-$i "/usr/lib/hadoop-current/bin/hdfs namenode -bootstrapStandby <<< N" fi ssh emr-header-$i "$nn_cmd"done# start datanodefor ((i=1; i<=$worker_cnt; i++));do echo "$dn_cmd" ssh emr-worker-$i "$dn_cmd"done
执行下面脚本
#!/bin/bashworker_cnt=`cat /etc/hosts | grep emr-worker | grep cluster | wc -l`master_cnt=1ha_flag=`grep -r high_availability_enable=true /usr/local/emr/emr-bin/script/`if [[ ! -z $ha_flag ]];then master_cnt=2finn_cmd='/usr/lib/hadoop-current/sbin/hadoop-daemon.sh stop namenode'dn_cmd='/usr/lib/hadoop-current/sbin/hadoop-daemon.sh stop datanode'#stop namenodefor ((i=1; i<=$master_cnt; i++));do ssh emr-header-$i "$nn_cmd"done# stop datanodefor ((i=1; i<=$worker_cnt; i++));do ssh emr-worker-$i "$dn_cmd"done
启停YARN服务,下面的脚本需要在master节点运行,且需要在hadoop账号下,su hadoop。
执行下面脚本
#!/bin/bashworker_cnt=`cat /etc/hosts | grep emr-worker | grep cluster | wc -l`master_cnt=1ha_flag=`grep -r high_availability_enable=true /usr/local/emr/emr-bin/script/`yarn_file=/usr/local/emr/emr-bin/script/yarn/start_up.shif [[ ! -z $ha_flag ]];then master_cnt=2 yarn_file=/usr/local/emr/emr-bin/script/ha_yarn/start_up.shfirm_cmd="export app_yarn_home=/usr/lib/hadoop-current;"\ `cat $yarn_file | grep -v 'echo' | grep 'start resourcemanager' | head -n 1 | awk -F '"' '{print $2;}'`nm_cmd="export app_yarn_home=/usr/lib/hadoop-current;"\ `cat $yarn_file | grep -v 'echo' | grep 'start nodemanager' | head -n 1 | awk -F '"' '{print $2;}'`#start resourcemanagerfor ((i=1; i<=$master_cnt; i++));do ssh emr-header-$i "$rm_cmd"done# start nodemanagerfor ((i=1; i<=$worker_cnt; i++));do ssh emr-worker-$i "$nm_cmd"done
执行下面脚本
#!/bin/bashworker_cnt=`cat /etc/hosts | grep emr-worker | grep cluster | wc -l`master_cnt=1ha_flag=`grep -r high_availability_enable=true /usr/local/emr/emr-bin/script/`if [[ ! -z $ha_flag ]];then master_cnt=2finn_cmd='/usr/lib/hadoop-current/sbin/hadoop-daemon.sh stop namenode'dn_cmd='/usr/lib/hadoop-current/sbin/hadoop-daemon.sh stop datanode'#stop resourcemanagerfor ((i=1; i<=$master_cnt; i++));do ssh emr-header-$i "$nn_cmd"done# stop nodemanagerfor ((i=1; i<=$worker_cnt; i++));do ssh emr-worker-$i "$dn_cmd"done
转载地址:http://ddwal.baihongyu.com/