栏目分类:
子分类:
返回
名师互学网用户登录
快速导航关闭
当前搜索
当前分类
子分类
实用工具
热门搜索
名师互学网 > IT > 软件开发 > 后端开发 > Java

Java访问Hadoop分布式文件系统HDFS的配置说明

Java 更新时间: 发布时间: IT归档 最新发布 模块sitemap 名妆网 法律咨询 聚返吧 英语巴士网 伯小乐 网商动力

Java访问Hadoop分布式文件系统HDFS的配置说明

配置文件

m103替换为hdfs服务地址。
要利用Java客户端来存取HDFS上的文件,不得不说的是配置文件hadoop-0.20.2/conf/core-site.xml了,最初我就是在这里吃了大亏,所以我死活连不上HDFS,文件无法创建、读取。







hadoop.tmp.dir
/home/zhangzk/hadoop
A base for other temporary directories.



fs.default.name
hdfs://linux-zzk-113:9000



配置项:hadoop.tmp.dir表示命名节点上存放元数据的目录位置,对于数据节点则为该节点上存放文件数据的目录。

配置项:fs.default.name表示命名的IP地址和端口号,缺省值是file:///,对于JavaAPI来讲,连接HDFS必须使用这里的配置的URL地址,对于数据节点来讲,数据节点通过该URL来访问命名节点。

hdfs-site.xml





 
  dfs.namenode.name.dir
  file:///mnt/sdc1/dfs/nn
 
 
  dfs.namenode.servicerpc-address
  m103:8022
 
 
  dfs.https.address
  m103:50470
 
 
  dfs.https.port
  50470
 
 
  dfs.namenode.http-address
  m103:50070
 
 
  dfs.replication
  3
 
 
  dfs.blocksize
  134217728
 
 
  dfs.client.use.datanode.hostname
  false
 
 
  fs.permissions.umask-mode
  022
 
 
  dfs.namenode.acls.enabled
  false
 
 
  dfs.block.local-path-access.user
  cloudera-scm
 
 
  dfs.client.read.shortcircuit
  false
 
 
  dfs.domain.socket.path
  /var/run/hdfs-sockets/dn
 
 
  dfs.client.read.shortcircuit.skip.checksum
  false
 
 
  dfs.client.domain.socket.data.traffic
  false
 
 
  dfs.datanode.hdfs-blocks-metadata.enabled
  true
 
 
  fs.http.impl
  com.scistor.datavision.fs.HTTPFileSystem
 


mapred-site.xml





 
  mapreduce.job.split.metainfo.maxsize
  10000000
 
 
  mapreduce.job.counters.max
  120
 
 
  mapreduce.output.fileoutputformat.compress
  true
 
 
  mapreduce.output.fileoutputformat.compress.type
  BLOCK
 
 
  mapreduce.output.fileoutputformat.compress.codec
  org.apache.hadoop.io.compress.SnappyCodec
 
 
  mapreduce.map.output.compress.codec
  org.apache.hadoop.io.compress.SnappyCodec
 
 
  mapreduce.map.output.compress
  true
 
 
  zlib.compress.level
  DEFAULT_COMPRESSION
 
 
  mapreduce.task.io.sort.factor
  64
 
 
  mapreduce.map.sort.spill.percent
  0.8
 
 
  mapreduce.reduce.shuffle.parallelcopies
  10
 
 
  mapreduce.task.timeout
  600000
 
 
  mapreduce.client.submit.file.replication
  1
 
 
  mapreduce.job.reduces
  24
 
 
  mapreduce.task.io.sort.mb
  256
 
 
  mapreduce.map.speculative
  false
 
 
  mapreduce.reduce.speculative
  false
 
 
  mapreduce.job.reduce.slowstart.completedmaps
  0.8
 
 
  mapreduce.jobhistory.address
  m103:10020
 
 
  mapreduce.jobhistory.webapp.address
  m103:19888
 
 
  mapreduce.jobhistory.webapp.https.address
  m103:19890
 
 
  mapreduce.jobhistory.admin.address
  m103:10033
 
 
  mapreduce.framework.name
  yarn
 
 
  yarn.app.mapreduce.am.staging-dir
  /user
 
 
  mapreduce.am.max-attempts
  2
 
 
  yarn.app.mapreduce.am.resource.mb
  2048
 
 
  yarn.app.mapreduce.am.resource.cpu-vcores
  1
 
 
  mapreduce.job.ubertask.enable
  false
 
 
  yarn.app.mapreduce.am.command-opts
  -Djava.net.preferIPv4Stack=true -Xmx1717986918
 
 
  mapreduce.map.java.opts
  -Djava.net.preferIPv4Stack=true -Xmx1717986918
 
 
  mapreduce.reduce.java.opts
  -Djava.net.preferIPv4Stack=true -Xmx2576980378
 
 
  yarn.app.mapreduce.am.admin.user.env
  LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH
 
 
  mapreduce.map.memory.mb
  2048
 
 
  mapreduce.map.cpu.vcores
  1
 
 
  mapreduce.reduce.memory.mb
  3072
 
 
  mapreduce.reduce.cpu.vcores
  1
 
 
  mapreduce.application.classpath
  $HADOOP_MAPRED_HOME
public class FileCopyToHdfs {

 public static void main(String[] args) throws Exception {
 try {
  //uploadToHdfs();  
  //deleteFromHdfs();
  //getDirectoryFromHdfs();
  appendToHdfs();
  readFromHdfs();
 } catch (Exception e) {
  // TODO Auto-generated catch block
  e.printStackTrace();
 }
 finally
 {
  System.out.println("SUCCESS");
 }
 }

 

 private static void uploadToHdfs() throws FileNotFoundException,IOException {
 String localSrc = "d://qq.txt";
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt";
 InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
 Configuration conf = new Configuration();
 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 OutputStream out = fs.create(new Path(dst), new Progressable() {
  public void progress() {
  System.out.print(".");
  }
 });
 IOUtils.copyBytes(in, out, 4096, true);
 }





 
 private static void readFromHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 FSDataInputStream hdfsInStream = fs.open(new Path(dst));
 
 OutputStream out = new FileOutputStream("d:/qq-hdfs.txt"); 
 byte[] ioBuffer = new byte[1024];
 int readLen = hdfsInStream.read(ioBuffer);

 while(-1 != readLen){
 out.write(ioBuffer, 0, readLen); 
 readLen = hdfsInStream.read(ioBuffer);
 }
 out.close();
 hdfsInStream.close();
 fs.close();
 }
 

 
 private static void appendToHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq.txt"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf); 
 FSDataOutputStream out = fs.append(new Path(dst));

 int readLen = "zhangzk add by hdfs java api".getBytes().length;

 while(-1 != readLen){
 out.write("zhangzk add by hdfs java api".getBytes(), 0, readLen);
 }
 out.close();
 fs.close();
 }
 

 
 private static void deleteFromHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk/qq-bak.txt"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 fs.deleteonExit(new Path(dst));
 fs.close();
 }
 

 
 private static void getDirectoryFromHdfs() throws FileNotFoundException,IOException {
 String dst = "hdfs://192.168.0.113:9000/user/zhangzk"; 
 Configuration conf = new Configuration(); 
 FileSystem fs = FileSystem.get(URI.create(dst), conf);
 FileStatus fileList[] = fs.listStatus(new Path(dst));
 int size = fileList.length;
 for(int i = 0; i < size; i++){
 System.out.println("name:" + fileList[i].getPath().getName() + "/t/tsize:" + fileList[i].getLen());
 }
 fs.close();
 } 

}

注意:对于append操作,从hadoop-0.21版本开始就不支持了,关于Append的操作可以参考Javaeye上的一篇文档。
转载请注明:文章转载自 www.mshxw.com
本文地址:https://www.mshxw.com/it/149964.html
我们一直用心在做
关于我们 文章归档 网站地图 联系我们

版权所有 (c)2021-2022 MSHXW.COM

ICP备案号:晋ICP备2021003244-6号