import com.alibaba.fastjson.JSONObject;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HbaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.*;
import java.util.List;
public class test2 {
public static void main(String[] args) {
//hbase 连接
try {
Configuration Hbase_CONF;
Hbase_CONF = HbaseConfiguration.create();
Hbase_CONF.set("hbase.zookeeper.property.clientPort", "2181");
Hbase_CONF.set("hbase.zookeeper.quorum", "192.168.xx.xx");
Hbase_CONF.set("hbase.master", "192.168.xx.xx:60000");
Hbase_CONF.set("zookeeper.znode.parent", "/hbase");
Hbase_CONF.setInt("hbase.hconnection.threads.max", 100);
Hbase_CONF.setInt("hbase.hconnection.threads.core", 50);
Hbase_CONF.setLong("hbase.hconnection.threads.keepalivetime", 1000);
ConnectionFactory.createConnection(Hbase_CONF);
Connection hbaseConnection = ConnectionFactory.createConnection(Hbase_CONF);
Table table = hbaseConnection.getTable(TableName.valueOf("0_library_token"));
System.out.println("Table Name: " + table.getName());
Scan scan = new Scan();
scan.addColumn(Bytes.toBytes("F"), Bytes.toBytes("F"));
scan.setMaxVersions(1111111111);//不设置默认,hbase只取row中的1个cell
ResultScanner resultScanner = table.getScanner(scan);
for (Result result : resultScanner) {
List| cells = result.listCells();//hbase的一个row对应的所有cell
byte[] row = result.getRow();
String rowKey = Bytes.toString(row);//rowkey
System.out.println("rowKey为"+rowKey);
for (Cell cell : cells) {//value
String jsonstr = Bytes.toString(CellUtil.clonevalue(cell));
//单个value中的cell
JSonObject jsonObject = JSONObject.parseObject(jsonstr);
System.out.println("path为" + jsonObject.toString );
}
System.out.println("************************");
}
} catch (IOException e) {
e.printStackTrace();
}
} |
插入:
Configuration Hbase_CONF;
Hbase_CONF = HbaseConfiguration.create();
Hbase_CONF.set("hbase.zookeeper.property.clientPort", "2181");
Hbase_CONF.set("hbase.zookeeper.quorum", "192.168.xx.xx");
Hbase_CONF.set("hbase.master", "192.168.xx.xx:60000");
Hbase_CONF.set("zookeeper.znode.parent", "/hbase");
Hbase_CONF.setInt("hbase.hconnection.threads.max", 100);
Hbase_CONF.setInt("hbase.hconnection.threads.core", 50);
Hbase_CONF.setLong("hbase.hconnection.threads.keepalivetime", 1000);
try {
ConnectionFactory.createConnection(Hbase_CONF);
Connection hbaseConnection = ConnectionFactory.createConnection(Hbase_CONF);
Table table = hbaseConnection.getTable(TableName.valueOf("0_file_pv"));
Put put = new Put("rk0001".getBytes()); //指定rowkey
put.addColumn("F".getBytes(), "F".getBytes(), "shandong".getBytes());
//插入数据
table.put(put);
table.close();
hbaseConnection.close();
System.out.println("结束***");
} catch (IOException e) {
e.printStackTrace();
}
包:
org.apache.hadoop hadoop-common${hadoop.version} org.apache.hadoop hadoop-hdfs${hadoop.version} org.apache.hadoop hadoop-client${hadoop.version} org.apache.hadoop hadoop-mapreduce-client-core${hadoop.version} org.apache.hbase hbase-client${hbase.version} org.apache.hbase hbase-server${hbase.version}
properties
UTF-8 2.6.5 1.2.5
注意hosts文件中的地址与连接地址保持一致



