在zhiyong2:
┌──────────────────────────────────────────────────────────────────────┐
│ • MobaXterm Personal Edition v21.4 • │
│ (SSH client, X server and network tools) │
│ │
│ ➤ SSH session to root@192.168.88.101 │
│ • Direct SSH : ✔ │
│ • SSH compression : ✔ │
│ • SSH-browser : ✔ │
│ • X11-forwarding : ✔ (remote display is forwarded through SSH) │
│ │
│ ➤ For more info, ctrl+click on help or visit our website. │
└──────────────────────────────────────────────────────────────────────┘
Last login: Wed Mar 2 22:16:34 2022
/usr/bin/xauth: file /root/.Xauthority does not exist
[root@zhiyong2 ~]# cd /opt/usdp-srv/srv/udp/2.0.0.0/hdfs/bin
[root@zhiyong2 bin]# ll
总用量 804
-rwxr-xr-x. 1 hadoop hadoop 98 3月 1 23:06 bootstrap-namenode.sh
-rwxr-xr-x. 1 hadoop hadoop 372928 11月 15 2020 container-executor
-rwxr-xr-x. 1 hadoop hadoop 88 3月 1 23:06 format-namenode.sh
-rwxr-xr-x. 1 hadoop hadoop 86 3月 1 23:06 format-zkfc.sh
-rwxr-xr-x. 1 hadoop hadoop 8580 11月 15 2020 hadoop
-rwxr-xr-x. 1 hadoop hadoop 11417 3月 1 23:06 hdfs
-rwxr-xr-x. 1 hadoop hadoop 6237 11月 15 2020 mapred
-rwxr-xr-x. 1 hadoop hadoop 387368 11月 15 2020 test-container-executor
-rwxr-xr-x. 1 hadoop hadoop 11888 11月 15 2020 yarn
[root@zhiyong2 bin]# ./hadoop fs -ls /
Found 6 items
drwxr-xr-x - hadoop supergroup 0 2022-03-02 22:27 /hbase
drwxr-xr-x - hadoop supergroup 0 2022-03-01 23:08 /tez
drwxrwxr-x - hadoop supergroup 0 2022-03-01 23:08 /tez-0.10.0
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 /tmp
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 /user
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:12 /zhiyong-1
也可以直接通过IP或者映射访问:
[root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong2:8020/ ls: Operation category READ is not supported in state standby. Visit https://s.apache.org/sbnn-error [root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong3:8020/ Found 6 items drwxr-xr-x - hadoop supergroup 0 2022-03-02 22:27 hdfs://zhiyong3:8020/hbase drwxr-xr-x - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong3:8020/tez drwxrwxr-x - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong3:8020/tez-0.10.0 drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 hdfs://zhiyong3:8020/tmp drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 hdfs://zhiyong3:8020/user drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:12 hdfs://zhiyong3:8020/zhiyong-1 [root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong4:8020/ ls: Call From zhiyong2/192.168.88.101 to zhiyong4:8020 failed on connection exception: java.net.ConnectException: 拒绝连接; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused
也可以跨集群访问:
[root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong5:8020/ ls: Operation category READ is not supported in state standby. Visit https://s.apache.org/sbnn-error [root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong6:8020/ Found 6 items drwxr-xr-x - hadoop supergroup 0 2022-03-02 22:39 hdfs://zhiyong6:8020/hbase drwxr-xr-x - hadoop supergroup 0 2022-03-01 23:34 hdfs://zhiyong6:8020/tez drwxrwxr-x - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong6:8020/tez-0.10.0 drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong6:8020/tmp drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong6:8020/user drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:38 hdfs://zhiyong6:8020/zhiyong-2 [root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong7:8020/ ls: Call From zhiyong2/192.168.88.101 to zhiyong7:8020 failed on connection exception: java.net.ConnectException: 拒绝连接; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused
可以直接访问域:
[root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong-1/
Found 6 items
drwxr-xr-x - hadoop supergroup 0 2022-03-02 22:27 hdfs://zhiyong-1/hbase
drwxr-xr-x - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong-1/tez
drwxrwxr-x - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong-1/tez-0.10.0
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 hdfs://zhiyong-1/tmp
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 hdfs://zhiyong-1/user
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:12 hdfs://zhiyong-1/zhiyong-1
[root@zhiyong2 bin]# ./hadoop fs -ls hdfs://zhiyong-2/
-ls: java.net.UnknownHostException: zhiyong-2
Usage: hadoop fs [generic options]
[-appendToFile ... ]
[-cat [-ignoreCrc] ...]
[-checksum ...]
[-chgrp [-R] GROUP PATH...]
[-chmod [-R] PATH...]
[-chown [-R] [OWNER][:[GROUP]] PATH...]
[-copyFromLocal [-f] [-p] [-l] [-d] [-t ] ... ]
[-copyToLocal [-f] [-p] [-ignoreCrc] [-crc] ... ]
[-count [-q] [-h] [-v] [-t []] [-u] [-x] [-e] ...]
[-cp [-f] [-p | -p[topax]] [-d] ... ]
[-createSnapshot []]
[-deleteSnapshot ]
[-df [-h] [ ...]]
[-du [-s] [-h] [-v] [-x] ...]
[-expunge]
[-find ... ...]
[-get [-f] [-p] [-ignoreCrc] [-crc] ... ]
[-getfacl [-R] ]
[-getfattr [-R] {-n name | -d} [-e en] ]
[-getmerge [-nl] [-skip-empty-file] ]
[-head ]
[-help [cmd ...]]
[-ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] [-e] [ ...]]
[-mkdir [-p] ...]
[-moveFromLocal ... ]
[-moveToLocal ]
[-mv ... ]
[-put [-f] [-p] [-l] [-d] ... ]
[-renameSnapshot ]
[-rm [-f] [-r|-R] [-skipTrash] [-safely] ...]
[-rmdir [--ignore-fail-on-non-empty] ...]
[-setfacl [-R] [{-b|-k} {-m|-x } ]|[--set ]]
[-setfattr {-n name [-v value] | -x name} ]
[-setrep [-R] [-w] ...]
[-stat [format] ...]
[-tail [-f] ]
[-test -[defsz] ]
[-text [-ignoreCrc] ...]
[-touchz ...]
[-truncate [-w] ...]
[-usage [cmd ...]]
Generic options supported are:
-conf specify an application configuration file
-D define a value for a given property
-fs specify default filesystem URL to use, overrides 'fs.defaultFS' property from configurations.
-jt specify a ResourceManager
-files specify a comma-separated list of files to be copied to the map reduce cluster
-libjars specify a comma-separated list of jar files to be included in the classpath
-archives specify a comma-separated list of archives to be unarchived on the compute machines
The general command line syntax is:
command [genericOptions] [commandOptions]
Usage: hadoop fs [generic options] -ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] [-e] [ ...]
但是集群之间默认没有打通,故USDP新集群不能直接通过域进行跨集群操作。
打通集群 查找hdfs-site.xml配置顺便提一句,这个监控界面还是很讨我喜欢的。。。
可以在hdfs-site.xml看到这些内容:
dfs.replication 3 dfs.name.dir /data/udp/2.0.0.0/hdfs/dfs/nn dfs.data.dir /data/udp/2.0.0.0/hdfs/dfs/data dfs.journalnode.edits.dir /data/udp/2.0.0.0/hdfs/jnData dfs.ha.namenodes.zhiyong-1 nn1,nn2 dfs.namenode.rpc-address.zhiyong-1.nn1 zhiyong2:8020 dfs.namenode.rpc-address.zhiyong-1.nn2 zhiyong3:8020 dfs.namenode.http-address.zhiyong-1.nn1 zhiyong2:50070 dfs.namenode.http-address.zhiyong-1.nn2 zhiyong3:50070 ha.zookeeper.quorum zhiyong2:2181,zhiyong3:2181,zhiyong4:2181 dfs.namenode.shared.edits.dir qjournal://zhiyong2:8485;zhiyong3:8485;zhiyong4:8485/zhiyong-1 dfs.client.failover.proxy.provider.zhiyong-1 org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider dfs.ha.fencing.methods sshfence(hadoop:22) dfs.ha.fencing.ssh.connect-timeout 30000 dfs.ha.fencing.ssh.private-key-files /home/hadoop/.ssh/id_rsa dfs.ha.automatic-failover.enabled true dfs.datanode.max.xcievers 4096 dfs.permissions.enable false dfs.webhdfs.enabled true dfs.namenode.heartbeat.recheck-interval 45000 fs.trash.interval 7320 dfs.datanode.max.transfer.threads 8192 dfs.image.compress true dfs.namenode.num.checkpoints.retained 12 dfs.datanode.data.dir.perm 750 dfs.datanode.handler.count 50 dfs.namenode.handler.count 50 dfs.socket.timeout 900000 dfs.hosts.exclude /srv/udp/2.0.0.0/hdfs/etc/hadoop/excludes dfs.namenode.replication.max-streams 32 dfs.namenode.replication.max-streams-hard-limit 200 dfs.namenode.replication.work.multiplier.per.iteration 200 dfs.datanode.balance.bandwidthPerSec 10485760 dfs.disk.balancer.enabled true dfs.disk.balancer.max.disk.throughputInMBperSec 50 dfs.disk.balancer.plan.threshold.percent 2 dfs.disk.balancer.block.tolerance.percent 5
zhiyong-2集群的这个xml内容:
筛选有用的配置dfs.replication 3 dfs.name.dir /data/udp/2.0.0.0/hdfs/dfs/nn dfs.data.dir /data/udp/2.0.0.0/hdfs/dfs/data dfs.journalnode.edits.dir /data/udp/2.0.0.0/hdfs/jnData dfs.ha.namenodes.zhiyong-2 nn1,nn2 dfs.namenode.rpc-address.zhiyong-2.nn1 zhiyong5:8020 dfs.namenode.rpc-address.zhiyong-2.nn2 zhiyong6:8020 dfs.namenode.http-address.zhiyong-2.nn1 zhiyong5:50070 dfs.namenode.http-address.zhiyong-2.nn2 zhiyong6:50070 ha.zookeeper.quorum zhiyong5:2181,zhiyong6:2181,zhiyong7:2181 dfs.namenode.shared.edits.dir qjournal://zhiyong5:8485;zhiyong6:8485;zhiyong7:8485/zhiyong-2 dfs.client.failover.proxy.provider.zhiyong-2 org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider dfs.ha.fencing.methods sshfence(hadoop:22) dfs.ha.fencing.ssh.connect-timeout 30000 dfs.ha.fencing.ssh.private-key-files /home/hadoop/.ssh/id_rsa dfs.ha.automatic-failover.enabled true dfs.datanode.max.xcievers 4096 dfs.permissions.enable false dfs.webhdfs.enabled true dfs.namenode.heartbeat.recheck-interval 45000 fs.trash.interval 7320 dfs.datanode.max.transfer.threads 8192 dfs.image.compress true dfs.namenode.num.checkpoints.retained 12 dfs.datanode.data.dir.perm 750 dfs.datanode.handler.count 50 dfs.namenode.handler.count 50 dfs.socket.timeout 900000 dfs.hosts.exclude /srv/udp/2.0.0.0/hdfs/etc/hadoop/excludes dfs.namenode.replication.max-streams 32 dfs.namenode.replication.max-streams-hard-limit 200 dfs.namenode.replication.work.multiplier.per.iteration 200 dfs.datanode.balance.bandwidthPerSec 10485760 dfs.disk.balancer.enabled true dfs.disk.balancer.max.disk.throughputInMBperSec 50 dfs.disk.balancer.plan.threshold.percent 2 dfs.disk.balancer.block.tolerance.percent 5
zhiyong-1中与跨nameservice域访问相关的配置为:
dfs.ha.namenodes.zhiyong-1 nn1,nn2 dfs.namenode.rpc-address.zhiyong-1.nn1 zhiyong2:8020 dfs.namenode.rpc-address.zhiyong-1.nn2 zhiyong3:8020 dfs.namenode.http-address.zhiyong-1.nn1 zhiyong2:50070 dfs.namenode.http-address.zhiyong-1.nn2 zhiyong3:50070 dfs.client.failover.proxy.provider.zhiyong-1 org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
zhiyong-2中与跨nameservice域访问相关的配置为:
添加配置dfs.ha.namenodes.zhiyong-2 nn1,nn2 dfs.namenode.rpc-address.zhiyong-2.nn1 zhiyong5:8020 dfs.namenode.rpc-address.zhiyong-2.nn2 zhiyong6:8020 dfs.namenode.http-address.zhiyong-2.nn1 zhiyong5:50070 dfs.namenode.http-address.zhiyong-2.nn2 zhiyong6:50070 dfs.client.failover.proxy.provider.zhiyong-2 org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
所以,在zhiyong-1中需要添加:
dfs.nameservices zhiyong-1,zhiyong-2 dfs.ha.namenodes.zhiyong-2 nn1,nn2 dfs.namenode.rpc-address.zhiyong-2.nn1 zhiyong5:8020 dfs.namenode.rpc-address.zhiyong-2.nn2 zhiyong6:8020 dfs.namenode.http-address.zhiyong-2.nn1 zhiyong5:50070 dfs.namenode.http-address.zhiyong-2.nn2 zhiyong6:50070 dfs.client.failover.proxy.provider.zhiyong-2 org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
同理,在zhiyong-2中需要添加:
同步配置 滚动重启HDFSdfs.nameservices zhiyong-1,zhiyong-2 dfs.ha.namenodes.zhiyong-1 nn1,nn2 dfs.namenode.rpc-address.zhiyong-1.nn1 zhiyong2:8020 dfs.namenode.rpc-address.zhiyong-1.nn2 zhiyong3:8020 dfs.namenode.http-address.zhiyong-1.nn1 zhiyong2:50070 dfs.namenode.http-address.zhiyong-1.nn2 zhiyong3:50070 dfs.client.failover.proxy.provider.zhiyong-1 org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
然后滚动重启2套集群的HDFS:
还需要重启节点:
事实上光是这样还不能访问。不出意外的话会看到-ls: java.net.UnknownHostException。这种报错的解决方式如下:
[root@zhiyong5 /]# ./hadoop fs -ls hdfs://zhiyong-1/
-bash: ./hadoop: 没有那个文件或目录
[root@zhiyong5 /]# cd /opt/usdp-srv/srv/udp/2.0.0.0/hdfs/bin
[root@zhiyong5 bin]# ll
总用量 804
-rwxr-xr-x. 1 hadoop hadoop 98 3月 1 23:32 bootstrap-namenode.sh
-rwxr-xr-x. 1 hadoop hadoop 372928 11月 15 2020 container-executor
-rwxr-xr-x. 1 hadoop hadoop 88 3月 1 23:32 format-namenode.sh
-rwxr-xr-x. 1 hadoop hadoop 86 3月 1 23:32 format-zkfc.sh
-rwxr-xr-x. 1 hadoop hadoop 8580 11月 15 2020 hadoop
-rwxr-xr-x. 1 hadoop hadoop 11417 3月 11 23:11 hdfs
-rwxr-xr-x. 1 hadoop hadoop 6237 11月 15 2020 mapred
-rwxr-xr-x. 1 hadoop hadoop 387368 11月 15 2020 test-container-executor
-rwxr-xr-x. 1 hadoop hadoop 11888 11月 15 2020 yarn
[root@zhiyong5 bin]# hadoop fs -ls hdfs://zhiyong-1/
-ls: java.net.UnknownHostException: zhiyong-1
Usage: hadoop fs [generic options]
[-appendToFile ... ]
[-cat [-ignoreCrc] ...]
[-checksum ...]
[-chgrp [-R] GROUP PATH...]
[-chmod [-R] PATH...]
[-chown [-R] [OWNER][:[GROUP]] PATH...]
[-copyFromLocal [-f] [-p] [-l] [-d] [-t ] ... ]
[-copyToLocal [-f] [-p] [-ignoreCrc] [-crc] ... ]
[-count [-q] [-h] [-v] [-t []] [-u] [-x] [-e] ...]
[-cp [-f] [-p | -p[topax]] [-d] ... ]
[-createSnapshot []]
[-deleteSnapshot ]
[-df [-h] [ ...]]
[-du [-s] [-h] [-v] [-x] ...]
[-expunge]
[-find ... ...]
[-get [-f] [-p] [-ignoreCrc] [-crc] ... ]
[-getfacl [-R] ]
[-getfattr [-R] {-n name | -d} [-e en] ]
[-getmerge [-nl] [-skip-empty-file] ]
[-head ]
[-help [cmd ...]]
[-ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] [-e] [ ...]]
[-mkdir [-p] ...]
[-moveFromLocal ... ]
[-moveToLocal ]
[-mv ... ]
[-put [-f] [-p] [-l] [-d] ... ]
[-renameSnapshot ]
[-rm [-f] [-r|-R] [-skipTrash] [-safely] ...]
[-rmdir [--ignore-fail-on-non-empty] ...]
[-setfacl [-R] [{-b|-k} {-m|-x } ]|[--set ]]
[-setfattr {-n name [-v value] | -x name} ]
[-setrep [-R] [-w] ...]
[-stat [format] ...]
[-tail [-f] ]
[-test -[defsz] ]
[-text [-ignoreCrc] ...]
[-touchz ...]
[-truncate [-w] ...]
[-usage [cmd ...]]
Generic options supported are:
-conf specify an application configuration file
-D define a value for a given property
-fs specify default filesystem URL to use, overrides 'fs.defaultFS' property from configurations.
-jt specify a ResourceManager
-files specify a comma-separated list of files to be copied to the map reduce cluster
-libjars specify a comma-separated list of jar files to be included in the classpath
-archives specify a comma-separated list of archives to be unarchived on the compute machines
The general command line syntax is:
command [genericOptions] [commandOptions]
Usage: hadoop fs [generic options] -ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] [-e] [ ...]
[root@zhiyong5 bin]# hadoop fs -ls hdfs://zhiyong-2/
Found 7 items
drwxr-xr-x - root supergroup 0 2022-03-11 23:04 hdfs://zhiyong-2/a1
drwxrwxrwx - hadoop supergroup 0 2022-03-02 22:39 hdfs://zhiyong-2/hbase
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:34 hdfs://zhiyong-2/tez
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong-2/tez-0.10.0
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong-2/tmp
drwxrwxrwx - hadoop supergroup 0 2022-03-11 22:28 hdfs://zhiyong-2/user
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:38 hdfs://zhiyong-2/zhiyong-2
[root@zhiyong5 bin]# find /** -iname 'hdfs-site.xml'
/opt/usdp-srv/usdp/templated/2.0.0.0/hdfs/hdfs-site.xml
/opt/usdp-srv/usdp/templated/2.0.0.0/yarn/hdfs-site.xml
/opt/usdp-srv/srv/udp/2.0.0.0/hdfs/etc/hadoop/hdfs-site.xml
/opt/usdp-srv/srv/udp/2.0.0.0/yarn/etc/hadoop/hdfs-site.xml
/opt/usdp-srv/srv/udp/2.0.0.0/hbase/conf/hdfs-site.xml
/opt/usdp-srv/srv/udp/2.0.0.0/phoenix/bin/hdfs-site.xml
/opt/usdp-srv/srv/udp/2.0.0.0/dolphinscheduler/conf/hdfs-site.xml
[root@zhiyong5 bin]# cat /opt/usdp-srv/srv/udp/2.0.0.0/hdfs/etc/hadoop/hdfs-site.xml
dfs.nameservices
zhiyong-1,zhiyong-2
dfs.ha.namenodes.zhiyong-1
nn1,nn2
dfs.namenode.rpc-address.zhiyong-1.nn1
zhiyong2:8020
dfs.namenode.rpc-address.zhiyong-1.nn2
zhiyong3:8020
dfs.namenode.http-address.zhiyong-1.nn1
zhiyong2:50070
dfs.namenode.http-address.zhiyong-1.nn2
zhiyong3:50070
dfs.client.failover.proxy.provider.zhiyong-1
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.replication
3
dfs.name.dir
/data/udp/2.0.0.0/hdfs/dfs/nn
dfs.data.dir
/data/udp/2.0.0.0/hdfs/dfs/data
dfs.journalnode.edits.dir
/data/udp/2.0.0.0/hdfs/jnData
dfs.ha.namenodes.zhiyong-2
nn1,nn2
dfs.namenode.rpc-address.zhiyong-2.nn1
zhiyong5:8020
dfs.namenode.rpc-address.zhiyong-2.nn2
zhiyong6:8020
dfs.namenode.http-address.zhiyong-2.nn1
zhiyong5:50070
dfs.namenode.http-address.zhiyong-2.nn2
zhiyong6:50070
ha.zookeeper.quorum
zhiyong5:2181,zhiyong6:2181,zhiyong7:2181
dfs.namenode.shared.edits.dir
qjournal://zhiyong5:8485;zhiyong6:8485;zhiyong7:8485/zhiyong-2
dfs.client.failover.proxy.provider.zhiyong-2
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence(hadoop:22)
dfs.ha.fencing.ssh.connect-timeout
30000
dfs.ha.fencing.ssh.private-key-files
/home/hadoop/.ssh/id_rsa
dfs.ha.automatic-failover.enabled
true
dfs.datanode.max.xcievers
4096
dfs.permissions.enable
false
dfs.webhdfs.enabled
true
dfs.namenode.heartbeat.recheck-interval
45000
fs.trash.interval
7320
dfs.datanode.max.transfer.threads
8192
dfs.image.compress
true
dfs.namenode.num.checkpoints.retained
12
dfs.datanode.data.dir.perm
750
dfs.datanode.handler.count
50
dfs.namenode.handler.count
50
dfs.socket.timeout
900000
dfs.hosts.exclude
/srv/udp/2.0.0.0/hdfs/etc/hadoop/excludes
dfs.namenode.replication.max-streams
32
dfs.namenode.replication.max-streams-hard-limit
200
dfs.namenode.replication.work.multiplier.per.iteration
200
dfs.datanode.balance.bandwidthPerSec
10485760
dfs.disk.balancer.enabled
true
dfs.disk.balancer.max.disk.throughputInMBperSec
50
dfs.disk.balancer.plan.threshold.percent
2
dfs.disk.balancer.block.tolerance.percent
5
[root@zhiyong5 bin]# cp /opt/usdp-srv/srv/udp/2.0.0.0/hdfs/etc/hadoop/hdfs-site.xml /opt/usdp-srv/srv/udp/2.0.0.0/yarn/etc/hadoop/hdfs-site.xml
cp:是否覆盖"/opt/usdp-srv/srv/udp/2.0.0.0/yarn/etc/hadoop/hdfs-site.xml"? y
[root@zhiyong5 bin]# hadoop fs -ls hdfs://zhiyong-2/
Found 7 items
drwxr-xr-x - root supergroup 0 2022-03-11 23:04 hdfs://zhiyong-2/a1
drwxrwxrwx - hadoop supergroup 0 2022-03-02 22:39 hdfs://zhiyong-2/hbase
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:34 hdfs://zhiyong-2/tez
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong-2/tez-0.10.0
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong-2/tmp
drwxrwxrwx - hadoop supergroup 0 2022-03-11 22:28 hdfs://zhiyong-2/user
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:38 hdfs://zhiyong-2/zhiyong-2
[root@zhiyong5 bin]# hadoop fs -ls hdfs://zhiyong-1/
Found 7 items
-rw-r--r-- 3 root supergroup 14444 2022-03-11 22:37 hdfs://zhiyong-1/a1
drwxrwxrwx - hadoop supergroup 0 2022-03-03 00:35 hdfs://zhiyong-1/hbase
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong-1/tez
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong-1/tez-0.10.0
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 hdfs://zhiyong-1/tmp
drwxrwxrwx - hadoop supergroup 0 2022-03-02 23:51 hdfs://zhiyong-1/user
drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:12 hdfs://zhiyong-1/zhiyong-1
[root@zhiyong5 bin]#
zhiyong-1也需要这样操作,之后就可以正常访问:
[root@zhiyong2 bin]# cd / [root@zhiyong2 /]# hadoop fs -ls hdfs://zhiyong-2/ Found 7 items drwxr-xr-x - root supergroup 0 2022-03-11 23:04 hdfs://zhiyong-2/a1 drwxrwxrwx - hadoop supergroup 0 2022-03-02 22:39 hdfs://zhiyong-2/hbase drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:34 hdfs://zhiyong-2/tez drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong-2/tez-0.10.0 drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:35 hdfs://zhiyong-2/tmp drwxrwxrwx - hadoop supergroup 0 2022-03-11 22:28 hdfs://zhiyong-2/user drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:38 hdfs://zhiyong-2/zhiyong-2 [root@zhiyong2 /]# hadoop fs -ls hdfs://zhiyong-1/ Found 7 items -rw-r--r-- 3 root supergroup 14444 2022-03-11 22:37 hdfs://zhiyong-1/a1 drwxrwxrwx - hadoop supergroup 0 2022-03-03 00:35 hdfs://zhiyong-1/hbase drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong-1/tez drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:08 hdfs://zhiyong-1/tez-0.10.0 drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:09 hdfs://zhiyong-1/tmp drwxrwxrwx - hadoop supergroup 0 2022-03-02 23:51 hdfs://zhiyong-1/user drwxrwxrwx - hadoop supergroup 0 2022-03-01 23:12 hdfs://zhiyong-1/zhiyong-1



