1 环境准备
1.1 修改IP
1.2 修改主机名及主机名和IP地址的映射
1.3 关闭防火墙
1.4 ssh免密登录
1.5 安装JDK,配置环境变量
2 集群规划
linux1 NameNode JournalNode DataNode ZKFC Zookeeper NodeManager
linux2 NameNode JournalNode DataNode ZKFC ZooKeeper ResourceManager NodeManager
linux3 JournalNode DataNode ZooKeeper ResourceManager NodeManager
3 安装Zookeeper集群
安装详解参考 : Zookeeper集群搭建 https://www.linuxidc.com/Linux/2020-03/162767.htm
4 配置Hadoop
4.1修改 core-site.xml
<configuration>
<!-- meNode的地址组装成一个集群mycluster -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop/data/ha/tmp</value>
</property>
<!-- 指定ZKFC故障自动切换转移 -->
<property>
<name>ha.zookeeper.quorum</name>
<value>linux1:2181,linux2:2181,linux3:2181</value>
</property>
</configuration>
4.2 修改hdfs-site.xml
<connfiguration>
<!-- 设置dfs副本数,默认3个 -->
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<!-- 完全分布式集群名称 -->
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<!-- 集群中NameNode节点都有哪些 -->
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>linux1:8020</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>linux2:8020</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>linux1:50070</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>linux2:50070</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>linux2:50070</value>
</property>
<!-- 指定NameNode元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://linux1:8485;linux2:8485;linux3:8485/mycluster</value>
</property>
<!-- 配置隔离机制,即同一时刻只能有一台服务器对外响应 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 使用隔离机制时需要ssh无秘钥登录-->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>
<!-- 声明journalnode服务器存储目录-->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/module/hadoop/data/ha/jn</value>
</property>
<!-- 关闭权限检查-->
<property>
<name>dfs.permissions.enable</name>
<value>false</value>
</property>
<!-- 访问代理类:client,mycluster,active配置失败自动切换实现方式-->
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置自动故障转移-->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
</configuration>
4.3 修改mapred-site.xml
[hadoop@linux1 hadoop]# mv mapred-site.xml.template mapred-site.xml
[hadoop@linux1 hadoop]# vi mapred-site.xml
<configuration>
<!-- 指定mr框架为yarn方式 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 指定mr历史服务器主机,端口 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>linux1:10020</value>
</property>
<!-- 指定mr历史服务器WebUI主机,端口 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>linux1:19888</value>
</property>
<!-- 历史服务器的WEB UI上最多显示20000个历史的作业记录信息 -->
<property>
<name>mapreduce.jobhistory.joblist.cache.size</name>
<value>20000</value>
</property>
<!--配置作业运行日志 -->
<property>
<name>mapreduce.jobhistory.done-dir</name>
<value>${yarn.app.mapreduce.am.staging-dir}/history/done</value>
</property>
<property>
<name>mapreduce.jobhistory.intermediate-done-dir</name>
<value>${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/tmp/hadoop-yarn/staging</value>
</property>
</configuration>
4.4 修改 slaves
linux1
linux2
linux3
4.5修改yarn-site.xml
[hadoop@linux2 hadoop]$ vi yarn-site.xml
<configuration>
<!-- reducer获取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--启用resourcemanager ha-->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!--声明两台resourcemanager的地址-->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>rmCluster</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>linux2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>linux3</value>
</property>
<!--指定zookeeper集群的地址-->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>linux1:2181,linux2:2181,linux3:2181</value>
</property>
<!--启用自动恢复-->
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!--指定resourcemanager的状态信息存储在zookeeper集群-->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
</configuration>
4.5 拷贝hadoop到其他节点
[hadoop@linux1 module]$ scp -r hadoop/ hadoop@linux2:/opt/module/
[hadoop@linux1 module]$ scp -r hadoop/ hadoop@linux3:/opt/module/
4.6 配置Hadoop环境变量
[hadoop@linux1 module]$ vim /etc/profile
export HADOOP_HOME=/opt/module/hadoop
export PATH=$Java_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
生效
[hadoop@linux1 module]$ source /etc/profile