hadoopHA搭建
上一篇安装hadoop集群环境中复制一个出来;
1、cd /opt/module
mkdir HA
chmod 777 HA/
cp -r /opt/module/hadoop-2.7.2 /opt/module/HA/
2、在HA目录操作
============core-site.xml==================
<configuration>
<!-- 把两个NameNode)的地址组装成一个集群mycluster -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<!-- 声明journalnode服务本地文件系统存储目录-->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/module/HA/hadoop-2.7.2/data/jn</value>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/HA/hadoop-2.7.2/data/tmp</value>
</property>
<property>
<name>ipc.client.connect.max.retries</name>
<value>20</value>
<description>
Indicates the number of retries a clientwill make to establisha server connection.
</description>
</property>
<property>
<name>ipc.client.connect.retry.interval</name>
<value>5000</value>
<description>
Indicates the number of milliseconds aclient will wait for before retrying to establish a server connection.
</description>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>hadoop108:2181,hadoop109:2181,hadoop110:2181</value>
</property>
</configuration>
============dfs.hostsl==================
hadoop108
hadoop109
hadoop110
============slaves==================
hadoop108
hadoop109
hadoop110
============hdfs-site.xml==================
<configuration>
<!-- 完全分布式集群名称 -->
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<!-- 集群中NameNode节点都有哪些 -->
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>hadoop108:8020</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>hadoop109:8020</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>hadoop108:50070</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>hadoop109:50070</value>
</property>
<!-- 指定NameNode元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://hadoop108:8485;hadoop109:8485;hadoop110:8485/mycluster</value>
</property>
<!-- 配置隔离机制,即同一时刻只能有一台服务器对外响应 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 使用隔离机制时需要ssh无秘钥登录-->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>
<!-- 关闭权限检查-->
<property>
<name>dfs.permissions.enable</name>
<value>false</value>
</property>
<!-- 访问代理类:client,mycluster,active配置失败自动切换实现方式-->
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
</configuration>
============mapred-site.xml==================
<configuration>
<!--指定mr运行在yarn上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!--历史服务器 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop108:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop108:19888</value>
</property>
<!--mapper 输出开启 -->
<property>
<name>mapreduce.map.output.compress</name>
<value>true</value>
</property>
<!--使用哪一种压缩方式 -->
<property>
<name>mapreduce.map.output.compress.codec</name>
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
</property>
<!--reducer 输出开启 -->
<property>
<name>mapreduce.output.fileoutputformat.compress</name>
<value>true</value>
</property>
<!--使用哪一种压缩方式 -->
<property>
<name>mapreduce.output.fileoutputformat.compress.codec</name>
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
</property>
<!--SequenceFile 输出使用的压缩类型:NONE & BLOCK-->
<property>
<name>mapreduce.output.fileoutputformat.compress.type</name>
<value>RECORD</value>
</property>
</configuration>
============yarn-site.xml==================
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--启用resourcemanager ha-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>cluster-yarn1</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>hadoop108</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>hadoop109</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>hadoop108:2181,hadoop109:2181,hadoop110:2181</value>
</property>
<!--启用自动恢复-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--指定resourcemanager的状态信息存储在zookeeper集群-->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
</configuration>
=============修改之后同步其他两台=====================
xsync /opt/module/hadoop-2.7.2 /opt/module/HA/
=====================xsync 执行脚本=============================
#!/bin/bash
#1 获取输入参数个数,如果没有参数,直接退出
pcount=$#
if((pcount==0));then
echo no args;
exit;
fi
#2 获取文件名称
p1=$1
fname=`basename $p1`
echo fname=$fname
#3 获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4获取当前用户名称
user=`whoami`
#5循环
for((host=109;host<=110;host++));do
#echo $pdir/$fname [email protected]$host:$pdir
echo --------------hadoop$host----------------
rsync -rvl $pdir/$fname [email protected]$host:$pdir
done
=================初始化============================
#!/bin/bash
echo "================ 初始化 ==========="
#ssh hadoop108 'cd /opt/module/HA/hadoop-2.7.2 ; rm -rfv data/ logs/ '
#ssh hadoop109 'cd /opt/module/HA/hadoop-2.7.2 ; rm -rfv data/ logs/ '
#ssh hadoop110 'cd /opt/module/HA/hadoop-2.7.2 ; rm -rfv data/ logs/ '
echo "=========在各个JournalNode节点上,输入以下命令启动journalnode服务=========="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
ssh hadoop110 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
echo "============在[nn1]上,对其进行格式化,并启动===================="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/bin/hdfs namenode -format'
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start namenode'
echo "=====================在[nn2]上,同步nn1的元数据信息=========================="
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/bin/hdfs namenode -bootstrapStandby'
echo "================启动[nn2]==================================="
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start namenode'
echo "======================关闭namenode=============================="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop namenode'
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop namenode'
======================关闭脚本===========================
#!/bin/bash
echo "================ 开始关闭所有节点服务 ==========="
echo "================ 正在关闭YARN ==========="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/stop-yarn.sh'
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/yarn-daemon.sh stop resourcemanager'
#echo "============各个JournalNode节点上,输入以下命令关闭journalnode服务===================="
#ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop journalnode'
#ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop journalnode'
#ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop journalnode'
echo "================ 正在关闭Zookeeper ==========="
for i in hadoop108 hadoop109 hadoop110
do
ssh $i 'source /etc/profile ;cd /opt/module/zookeeper-3.4.10/bin/ ; ./zkServer.sh stop'
echo $i 执行完
done
echo "==============关闭DFSZK Failover Controller============="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop zkfc'
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh stop zkfc'
echo "==================关闭HDFS==========================="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/stop-dfs.sh'
echo "======================查看服务状态==================="
for i in hadoop108 hadoop109 hadoop110
do
echo "=========== $i ==========="
ssh $i '/opt/module/jdk1.8.0_191/bin/jps'
done
echo "-------------------------架构-------------------------------- ------------"
echo "-----hadoop108------------hadoop109---------------hadoop110- -------------"
echo "-----NameNode-------------NameNode----------------------------------------"
echo "-----JournalNode----------JournalNode-------------JournalNode-------------"
echo "-----DataNode-------------DataNode----------------DataNode----------------"
echo "-----ZK-------------------ZK----------------------ZK----------------------"
echo "-----ResourceManager------ResourceManager---------------------------------"
echo "-----NodeManager----------NodeManager-------------NodeManager-------------"
=====================群起脚本=========================
#!/bin/bash
echo "================ 开始启动所有节点服务 ==========="
#echo "============各个JournalNode节点上,输入以下命令启动journalnode服务===================="
#ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
#ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
#ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
#echo "==================关闭HDFS==========================="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/stop-dfs.sh'
echo "================ 正在启动Zookeeper ==========="
for i in hadoop108 hadoop109 hadoop110
do
ssh $i 'source /etc/profile ;cd /opt/module/zookeeper-3.4.10/bin/ ; ./zkServer.sh start'
echo $i 执行完
done
echo "================初始化HA在Zookeeper中状态=========================="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/bin/hdfs zkfc -formatZK'
echo "================ 正在启动HDFS ==========="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/start-dfs.sh'
echo "==============启动DFSZK Failover Controller============="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start zkfc'
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/hadoop-daemon.sh start zkfc'
echo "================ 正在启动YARN ==========="
ssh hadoop108 '/opt/module/HA/hadoop-2.7.2/sbin/start-yarn.sh'
ssh hadoop109 '/opt/module/HA/hadoop-2.7.2/sbin/yarn-daemon.sh start resourcemanager'
echo "======================查看服务状态==================="
for i in hadoop108 hadoop109 hadoop110
do
echo "=========== $i ==========="
ssh $i '/opt/module/jdk1.8.0_191/bin/jps'
done
echo "-------------------------架构-------------------------------- ------------"
echo "-----hadoop108------------hadoop109---------------hadoop110- -------------"
echo "-----NameNode-------------NameNode----------------------------------------"
echo "-----JournalNode----------JournalNode-------------JournalNode-------------"
echo "-----DataNode-------------DataNode----------------DataNode----------------"
echo "-----ZK-------------------ZK----------------------ZK----------------------"
echo "-----ResourceManager------ResourceManager---------------------------------"
echo "-----NodeManager----------NodeManager-------------NodeManager-------------"
==========================查看=================================
#!/bin/bash
for i in hadoop108 hadoop109 hadoop110
do
echo "=========== $i ==========="
ssh $i '/opt/module/jdk1.8.0_191/bin/jps'
done
===============================================