import java.io.FileInputStream;
import java.io.FileOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
public class HDFSTest {
public static void main(String[] args) throws Exception {
//1 创建连接
Configuration conf = new Configuration();
//2 连接端口
conf.set("fs.defaultFS", "hdfs://127.0.0.1:9000");
//3 获取连接对象
FileSystem fs = FileSystem.get(conf);
//本地文件上传到 hdfs
// fs.copyFromLocalFile(new Path("/Users/zhangsf/bigdata/spark-2.4.7-bin-hadoop2.7/logs/spark-zhangsf-org.apache.spark.deploy.worker.Worker-1-ever.local.out"),
// new Path("/zhangvalue/input/1111local.out"));
// fs.close();
//流上传文件
//读取本地文件
FileInputStream in = new FileInputStream(
"/Users/zhangsf/bigdata/spark-2.4.7-bin-hadoop2.7/logs/spark-zhangsf-org.apache.spark.deploy.worker.Worker-1-ever.local.out");
//在hdfs上创建路径
FSDataOutputStream out = fs.create(new Path("/zhangvalue/input/1111local.out"));
byte[] b = new byte[1024 * 1024];
int read = 0;
while ((read = in.read(b)) > 0) {
out.write(b, 0, read);
}
}
}



