ONI文件生成与读取

之前一直不知道如何实时保存RGB-D数据,每次都写入PCD文件不现实,对于30fps的,每秒就有30个PCD文件,硬盘速度绝逼跟不上。保存color和depth视频吧,总觉得不方便,而且depth压缩与解压缩会有精度损失。

后来老外提醒我:

/OpenNI2/Source/Tools/NiViewer    press s to record oni file


感觉很方便,于是自己参考了《openni cookbook》,开始罗代码了。没有解释,有 问题自己看那本书吧。

生成含有RBG-D的ONI文件:

#include <OpenNI.h> 
#include <stdio.h>
using namespace openni;
char ReadLastCharOfLine()
{
	int newChar = 0;
	int lastChar;

	fflush(stdout);

	do
	{
		lastChar = newChar;
		newChar = getchar();
	}while ((newChar != '\n')&& (newChar != EOF));

	return (char)lastChar;
}
bool HandleStatus(Status status)
{
	if (status == STATUS_OK)
		return true;
	printf("ERROR: #%d, %s", status,OpenNI::getExtendedError());
	ReadLastCharOfLine();
	return false;
}

int main()
{
	Status status = STATUS_OK;
	printf("Scanning machine for devices and loading " "modules/drivers ...\r\n");
    status = OpenNI::initialize();
    if (!HandleStatus(status))
		return 1;
    printf("Completed.\r\n");
	Device device;
	printf("Opening first device ...\r\n");
	
	status = device.open(ANY_DEVICE);
	if (!HandleStatus(status)) 
		return 1;
	printf("%s Opened, Completed.\r\n",
	device.getDeviceInfo().getName());
	printf("Checking if stream is supported ...\r\n");
	if (!device.hasSensor(SENSOR_DEPTH))
	{
		printf("Stream not supported by this device.\r\n");
		return 1;
	}
	printf("Asking device to create a depth stream ...\r\n");

	VideoStream depthSensor;
	VideoStream colorSensor;

	status = depthSensor.create(device, SENSOR_DEPTH);
	if (!HandleStatus(status)) 
		return 1;

	status = colorSensor.create(device, SENSOR_COLOR);
	if (!HandleStatus(status)) 
		return 1;
	printf("Starting stream ...\r\n");

	status = depthSensor.start();
	if (!HandleStatus(status)) 
		return 1;
	status = colorSensor.start();
	if (!HandleStatus(status)) 
		return 1;
	printf("Done.\r\n");
	printf("Creating a recorder ...\r\n");

	Recorder recorder;
	status = recorder.create("sample.oni");
	if (!HandleStatus(status))
		return 1;
	printf("Done.\r\n");
	printf("Attaching to depth sensor ...\r\n");
	status = recorder.attach(depthSensor);
	if (!HandleStatus(status)) 
		return 1;
        status = recorder.attach(colorSensor);
	if (!HandleStatus(status)) 
		return 1;
	printf("Done.\r\n");
	printf("Starting recorder ...\r\n");
	status = recorder.start();
	if (!HandleStatus(status)) 
		return 1;
	printf("Done. Now recording ...\r\n");
	
	ReadLastCharOfLine();
	recorder.destroy();
	depthSensor.destroy();
	colorSensor.destroy();
	device.close();
	OpenNI::shutdown();
	return 0;
}

读取上述ONI文件的RGB信息,并保存到PNG文件【代码是转载的,如果要读取深度信息,再加个 openni::SENSOR_DEPTH分支就行了】

#include <iostream>  
#include <OpenNI.h>  
#include <opencv2/photo.hpp>  
#include <opencv2/highgui.hpp>  
  
using namespace std;  
  
int main()  
{  
    //定义oni文件中视频的总帧数以及得到的图片的保存目录  
    int total = 0;  
    char* imagefile = "/home/jst/Data";  
      
    //初始化OpenNI环境  
    openni::OpenNI::initialize();  
  
    //声明设备并打开oni文件  
    openni::Device fromonifile;  
    fromonifile.open("sample.oni");  
  
    //声明控制对象,这对视频流的控制起到了关键作用  
    openni::PlaybackControl* pController = fromonifile.getPlaybackControl();  
  
    //声明视频流对象以及帧对象  
    openni::VideoStream streamColor;  
    openni::VideoFrameRef frameColor;  
  
    //验证是否有彩色传感器(是否有彩色视频)和建立与设备想关联的视频流  
    if(fromonifile.hasSensor(openni::SENSOR_COLOR))  
    {         
        if(streamColor.create( fromonifile, openni::SENSOR_COLOR ) == openni::STATUS_OK )  
        {  
            cout<<"建立视频流成功"<<endl;  
        }  
        else  
        {  
            cerr<<"ERROR: 建立视频流没有成功"<<endl;  
            system("pause");  
            return -1;  
        }  
    }  
    else  
    {  
        cerr << "ERROR: 该设备没有彩色传感器" << endl;  
        system("pause");  
        return -1;  
    }  
  
    //建立显示窗口  
    cv::namedWindow("Image");  
  
    //获取总的视频帧数并将该设备的速度设为-1以便能留出足够的时间对每一帧进行处理、显示和保存  
    total = pController->getNumberOfFrames(streamColor);  
    pController->setSpeed(-1);  
  
    //开启视频流  
    streamColor.start();  
    for (int i = 1;i <= total; ++ i)  
    {  
        //读取视频流的当前帧  
        streamColor.readFrame(&frameColor);  
  
        cout<<"当前正在读的帧数是:"<<frameColor.getFrameIndex()<<endl;  
        cout<<"当前的循环次数是:  "<<i<<endl;  
  
        //将帧保存到Mat中并且将其转换到BGR模式,因为在OpenCV中图片的模式是BGR  
        cv::Mat rgbImg(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData());  
        cv::Mat bgrImg;  
        cvtColor(rgbImg, bgrImg, CV_RGB2BGR);  
  
        //将每一帧按顺序帧保存到图片目录下  
        char imagefullname[255];  
        char imagenum[50];  
        sprintf(imagenum,"/%03d.png",i);  
        strcpy(imagefullname,imagefile);  
        strcat(imagefullname,imagenum);  
        cv::imwrite(imagefullname,bgrImg);  
  
        //显示当前帧  
        cv::imshow("Image",bgrImg);  
        if (cv::waitKey(30) == 27)  
        {  
            break;  
        }  
    }  
  
    //销毁显示窗口  
    cv::destroyWindow("Image");   
  
    //关闭视频流  
    streamColor.destroy();  
  
    //关闭设备  
    fromonifile.close();  
  
    //关闭OpenNI  
    openni::OpenNI::shutdown();  
      
    return 0;  
}  



上一篇:log4net用法实例


下一篇:如何访问E-MapReduce中HBase集群