这次安装是用4台虚拟机进行安装的,分别是gree129,gree131,gree132,gree134
1、vi ./core-site.xml
fs.defaultFS
hdfs://mycluster/
hadoop.tmp.dir
/opt/soft/hadoop260/hadooptmp
ha.zookeeper.quorum
gree131:2181,gree132:2181,gree134:2181
hadoop.proxyuser.bigdata.hosts
*
hadoop.proxyuser.bigdata.groups
*
2.vi ./hdfs-site.xml
dfs.nameservices
mycluster
dfs.ha.namenodes.mycluster
nn1,nn2
dfs.namenode.rpc-address.mycluster.nn1
gree129:9000
dfs.namenode.http-address.mycluster.nn1
gree129:50070
dfs.namenode.rpc-address.mycluster.nn2
gree131:9000
dfs.namenode.http-address.mycluster.nn2
gree131:50070
dfs.journalnode.edits.dir
/opt/soft/hadoop260/journaldata
dfs.namenode.shared.edits.dir
qjournal://gree129:8485;gree131:8485;gree132:8485/mycluster
dfs.ha.automatic-failover.enabled
true
dfs.client.failover.proxy.provider.mycluster
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence
shell(/bin/true)
dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_rsa
dfs.ha.fencing.ssh.connect-timeout
30000
dfs.webhdfs.enabled
true
3. vi ./yarn-site.xml
yarn.resourcemanager.ha.enabled
true
yarn.resourcemanager.cluster-id
yrc
yarn.resourcemanager.ha.rm-ids
rm1,rm2
yarn.resourcemanager.hostname.rm1
gree135
yarn.resourcemanager.hostname.rm2
gree136
yarn.resourcemanager.zk-address
gree135:2181,gree136:2181,gree137:2181
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.log-aggregation-enable
true
yarn.log-aggregation.retain-seconds
86400
yarn.resourcemanager.recovery.enabled
true
yarn.resourcemanager.store.class
org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
4.vi ./slaves
gree128
gree129
gree131
gree132
gree134