文章目录
- Hadoop2.7.2 HA部署
- 解压
- 改名
- 配置环境变量
- 配置 hadoop-env.sh
- 配置yarn-env.sh
- 配置mapred-env.sh
- 配置slaves
- 配置 core-site.xml
- 配置 hdfs-site.xml
- 配置yarn-site.xml
- 初始化
tar -zxvf hadoop-2.7.2.tar.gz -C /opt/改名
cd /opt
mv hadoop-2.7.2 hadoop配置环境变量
sudo vi /etc/profile
添加
#HADOOP_HOME export HADOOP_HOME=/opt/hadoop export PATH=$PATH:$HADOOP_HOME/bin export PATH=$PATH:$HADOOP_HOME/sbin
使环境变量生效
source /etc/profile配置 hadoop-env.sh
export JAVA_HOME=/opt/jdk配置yarn-env.sh
export JAVA_HOME=/opt/jdk配置mapred-env.sh
export JAVA_HOME=/opt/jdk配置slaves
server1 server2 server3配置 core-site.xml
配置 hdfs-site.xmlfs.defaultFS hdfs://mycluster hadoop.tmp.dir /opt/hadoop/data/tmp ha.zookeeper.quorum server1:2181,server2:2181,server3:2181 ipc.client.connect.max.retries 10 ipc.client.connect.retry.interval 1000
配置yarn-site.xmldfs.nameservices mycluster dfs.ha.namenodes.mycluster nn1,nn2 dfs.namenode.rpc-address.mycluster.nn1 server1:9000 dfs.namenode.rpc-address.mycluster.nn2 server2:9000 dfs.namenode.http-address.mycluster.nn1 server1:50070 dfs.namenode.http-address.mycluster.nn2 server2:50070 dfs.namenode.shared.edits.dir qjournal://server1:8485;server2:8485;server3:8485/mycluster dfs.ha.fencing.methods sshfence dfs.ha.fencing.ssh.private-key-files /home/server/.ssh/id_rsa dfs.journalnode.edits.dir /opt/hadoop/data/jn dfs.permissions.enable false dfs.client.failover.proxy.provider.mycluster org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider dfs.ha.automatic-failover.enabled true
初始化yarn.nodemanager.aux-services mapreduce_shuffle yarn.resourcemanager.ha.enabled true yarn.resourcemanager.cluster-id cluster-yarn1 yarn.resourcemanager.ha.rm-ids rm1,rm2 yarn.resourcemanager.hostname.rm1 server1 yarn.resourcemanager.hostname.rm2 server2 yarn.resourcemanager.zk-address server1:2181,server2:2181,server3:2181 yarn.resourcemanager.recovery.enabled true yarn.resourcemanager.store.class org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
各个节点启动journalnode
hadoop-daemon.sh start journalnode
节点一初始化namenode,然后启动namenode
hdfs namenode -format hadoop-daemon.sh start namenode
节点二同步namenode的信息
hdfs namenode -bootstrapStandby
第二个节点启动第二个namenode
hadoop-daemon.sh start namenode
在节点一启动
hadoop-daemons.sh start datanode
检查完之后关闭
stop-dfs.sh
各个节点开启zookeeper
zkServer.sh start
第一个节点初始化hadoop在zookeeper里面的状态
hdfs zkfc -formatZK
再次启动
start-dfs.sh
第一个节点启动
start-yarn.sh
第二个节点启动
yarn-daemon.sh start resourcemanager
查看服务状态
hdfs haadmin -getServiceState nn1 hdfs haadmin -getServiceState nn2 yarn rmadmin -getServiceState rm1 yarn rmadmin -getServiceState rm2



