It looks like different mappings of the depth data.
You can try to put the libfreenect data into a cv::Mat and scale that:
const float scaleFactor = 0.05f;
depth.convertTo(depthMat8UC1, CV_8UC1, scaleFactor);
imshow("depth gray",depthMat8UC1);
You can also checkout this article and on building OpenNI2 on a Jetson TK1.
Once you have OpenNI setup and working, you should be able to compile OpenCV from source enabling WITH_OPENNI with cmake. After that you should be able to grab the depth data directly in OpenCV:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
const float scaleFactor = 0.05f;
int main(){
cout << "opening device(s)" << endl;
VideoCapture sensor;
sensor.open(CV_CAP_OPENNI);
if( !sensor.isOpened() ){
cout << "Can not open capture object 1." << endl;
return -1;
}
for(;;){
Mat depth,depthScaled;
if( !sensor.grab() ){
cout << "Sensor1 can not grab images." << endl;
return -1;
}else if( sensor.retrieve( depth, CV_CAP_OPENNI_DEPTH_MAP ) ) {
depth.convertTo(depthScaled, CV_8UC1, scaleFactor);
imshow("depth",depth);
imshow("depth scaled",depthScaled);
}
if( waitKey( 30 ) == 27 ) break;
}
}