Hbase的测试关键是要引用正确的Hbase jar包(还要保证版本的兼容,Hbase1.0开始就要求JDK1.7及以上,而Loadrunner11不支持JDK1.7,所以本文举例用的是Loadrunner12,另外要保证引用的Hbase Jar包也是与服务端的Hbase版本一致,否则也会出现兼容性问题)。
1、在loadrunner中新建脚本(本文以LoadRunner12.02为例),要求选择协议类型为Java Vuser
2、在Replay-->Runtime Settings设置Java VM路径,由于LoadRunner12对jdk1.8的支持不好,本次测试是拷贝了一份绿色免安装版(32位)的jdk1.7.0_67,所以路径选择固定路径模式(Use the Specified JDK),并设置好JDK1.7的路径。
3、使用Java操作Hbase需要相关的Jar包(可以去官网下载),也可以下载我上传的包(性能测试工具所引用的hbase依赖包),放到include目录或其他目录下,并在Run-time Settings中配置Classpath
4、在Java Vuser脚本中编写Hbase的测试代码(举例如下,具体需要配置好):
/*
* LoadRunner Java script. (Build: _build_number_)
*
* Script Description:
*
*/
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import org.apache.log4j.xml.DOMConfigurator;
import java.io.File;
import lrapi.lr;
public class Actions
{
String tableName="student";
Configuration configuration=null;
public int init() throws Throwable {
//加载日志输出方式
File directory = new File(".");
DOMConfigurator.configure(directory.getCanonicalPath()+"\\log4j.xml");//加载log4j.xml文件
//PropertyConfigurator.configure("E:/study/log4j/log4j.properties");//加载.properties文件
Logger log=Logger.getLogger("org.zblog.test");
System.setProperty("hadoop.home.dir", directory.getCanonicalPath()+"\\hadoop-common-2.2.0-bin-master");
configuration = HBaseConfiguration.create();
//config.set("hbase.zookeeper.quorum", "hellotest");//单机
configuration.set("hbase.zookeeper.quorum", "agent01.org.cn,agent02.org.cn,master.org.cn");
// zookeeper地址(包括一个主节点,两个子节点)
configuration.set("hbase.zookeeper.property.clientPort", "2181");// zookeeper端口
configuration.set("hbase.regionserver.port", "16020");
configuration.set("zookeeper.znode.parent", "/hbase-unsecure");
configuration.set("hbase.rootdir", "hdfs://master.org.cn:8020/apps/hbase/data");
//connection = ConnectionFactory.createConnection(config);
return 0;
}//end of init
public int action() throws Throwable {
createTable(configuration, tableName);
addData(configuration, tableName);
getData(configuration, tableName);
getAllData(configuration, tableName);
deleteDate(configuration, tableName);
dropTable(configuration, tableName);
return 0;
}//end of action
public int end() throws Throwable {
return 0;
}//end of end
/**
* create a new Table
* @param configuration Configuration
* @param tableName String,the new Table's name
* */
public static void createTable(Configuration configuration,String tableName){
HBaseAdmin admin;
try {
admin = new HBaseAdmin(configuration);
if(admin.tableExists(tableName)){
admin.disableTable(tableName);
admin.deleteTable(tableName);
System.out.println(tableName+"is exist ,delete ......");
}
HTableDescriptor tableDescriptor=new HTableDescriptor(TableName.valueOf(tableName));
tableDescriptor.addFamily(new HColumnDescriptor("info"));
tableDescriptor.addFamily(new HColumnDescriptor("address"));
admin.createTable(tableDescriptor);
System.out.println("end create table");
} catch (MasterNotRunningException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* Delete the existing table
* @param configuration Configuration
* @param tableName String,Table's name
* */
public static void dropTable(Configuration configuration,String tableName){
HBaseAdmin admin;
try {
admin = new HBaseAdmin(configuration);
if(admin.tableExists(tableName)){
admin.disableTable(tableName);
admin.deleteTable(tableName);
System.out.println(tableName+"delete success!");
}else{
System.out.println(tableName+"Table does not exist!");
}
} catch (MasterNotRunningException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* insert a data
* @param configuration Configuration
* @param tableName String,Table's name
* */
public static void addData(Configuration configuration,String tableName){
HBaseAdmin admin;
try {
admin = new HBaseAdmin(configuration);
if(admin.tableExists(tableName)){
HTable table=new HTable(configuration, tableName);
Put put=new Put(Bytes.toBytes("zhangsan"));
put.add(Bytes.toBytes("info"), Bytes.toBytes("age"), Bytes.toBytes("28"));
table.put(put);
System.out.println("add success!");
}else{
System.out.println(tableName+"Table does not exist!");
}
} catch (MasterNotRunningException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* Delete a data
* @param configuration Configuration
* @param tableName String,Table's name
* */
public static void deleteDate(Configuration configuration,String tableName){
HBaseAdmin admin;
try {
admin=new HBaseAdmin(configuration);
if(admin.tableExists(tableName)){
HTable table=new HTable(configuration, tableName);
Delete delete=new Delete(Bytes.toBytes("zhangsan"));
table.delete(delete);
System.out.println("delete success!");
}else{
System.out.println("Table does not exist!");
}
} catch (MasterNotRunningException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* get a data
* @param configuration Configuration
* @param tableName String,Table's name
* */
public static void getData(Configuration configuration,String tableName){
HTable table;
try {
table = new HTable(configuration, tableName);
Get get=new Get(Bytes.toBytes("zhangsan"));
Result result=table.get(get);
for(Cell cell:result.rawCells()){
System.out.println("RowName:"+new String(CellUtil.cloneRow(cell))+" ");
System.out.println("Timetamp:"+cell.getTimestamp()+" ");
System.out.println("column Family:"+new String(CellUtil.cloneFamily(cell))+" ");
System.out.println("row Name:"+new String(CellUtil.cloneQualifier(cell))+" ");
System.out.println("value:"+new String(CellUtil.cloneValue(cell))+" ");
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* insert all data
* @param configuration Configuration
* @param tableName String,Table's name
* */
public static void getAllData(Configuration configuration,String tableName){
HTable table;
try {
table=new HTable(configuration, tableName);
Scan scan=new Scan();
ResultScanner results=table.getScanner(scan);
for(Result result:results){
for(Cell cell:result.rawCells()){
System.out.println("RowName:"+new String(CellUtil.cloneRow(cell))+" ");
System.out.println("Timetamp:"+cell.getTimestamp()+" ");
System.out.println("column Family:"+new String(CellUtil.cloneFamily(cell))+" ");
System.out.println("row Name:"+new String(CellUtil.cloneQualifier(cell))+" ");
System.out.println("value:"+new String(CellUtil.cloneValue(cell))+" ");
}
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
5、保存脚本,同时将Hadoop-common-2.2.0-bin-master文件目录放到脚本的根目录下(因为以上代码写的取文件路径就是脚本根目录),这么做的目的主要是Windows下调用hadoop组件,如果没有winutils.exe就会报错(可以到网上下载这个Hadoop-common-2.2.0-bin-master包,上面提供下载的性能测试工具所引用的hbase依赖包里也有)。
6、运行脚本,通过LoadRunner的Replay Log可以看到数据操作测试执行成功:
7、通过性能测试工具调用Hbase的最大挑战是某些Jar包的缺失和版本的不兼容,所以本文也只能是提供个参考,具体应用的时候需要根据服务端部署的Hadoop版本和开发应用的需要,进行配置和引用相关的Jar包,并通过Loadrunner的Replay调试功能,不断的解决报错问题(是个考验耐心和细心的过程,中间遇到的问题有些可能是需要不断搜索外文网站和一些社区论坛才能找到正确的解释,没办法,开源技术没有现成的资料和使用说明,就得靠自己摸索和不断网上搜索)。