返回上级
共9条
跳至  页 
  • 阅读数:561发布于2021-06-25 11:26:26

    只看该作者
    用奥比深度相机扫描一个非常平的平面,为什么深度值会出现厘米级的误差? 复制本文链接
    #include <iostream>
    #include <vector>
    #include "OpenNI.h"
    #include "OniSampleUtilities.h"
    
    using namespace openni;
    using namespace std;
    
    #define MIN_DISTANCE 20  //单位毫米
    #define MAX_DISTANCE 1200 //单位毫米
    #define RESOULTION_X 640.0  //标定时的分辨率
    #define RESOULTION_Y 480.0  //标定时的分辨率
    
    #define MAX_FRAME_COUNT 50
    
    typedef struct SPoint
    {
        float x, y, z;
        SPoint(float u = 0.0f, float v = 0.0f, float w = 0.0f)
            : x(u), y(v), z(w) 
        { }
    } Point;
    
    typedef struct xnIntrinsic_Params
    {
        xnIntrinsic_Params() :
            c_x(320.0), c_y(240.0), f_x(480.0), f_y(480.0)
        {}
    
        xnIntrinsic_Params(float c_x_, float c_y_, float f_x_, float f_y_) :
            c_x(c_x_), c_y(c_y_), f_x(f_x_),f_y(f_y_)
        {}
    
        float c_x; //u轴上的归一化焦距
        float c_y; //v轴上的归一化焦距
        float f_x; //主点x坐标
        float f_y; //主点y坐标
    }xIntrinsic_Params;
    
    xIntrinsic_Params g_IntrinsicParam; //存储相机内参的全局变量
    
    void getCameraParams(openni::Device& Device, xIntrinsic_Params& IrParam)
    {
        OBCameraParams cameraParam;
        int dataSize = sizeof(cameraParam);
        memset(&cameraParam, 0, sizeof(cameraParam));
        openni::Status rc = Device.getProperty(openni::OBEXTENSION_ID_CAM_PARAMS, (uint8_t *)&cameraParam, &dataSize);
        if (rc != openni::STATUS_OK)
        {
            std::cout << "Error:" << openni::OpenNI::getExtendedError() << std::endl;
            return;
        }
        IrParam.f_x = cameraParam.l_intr_p[0]; //u轴上的归一化焦距
        IrParam.f_y = cameraParam.l_intr_p[1]; //v轴上的归一化焦距
        IrParam.c_x = cameraParam.l_intr_p[2]; //主点x坐标
        IrParam.c_y = cameraParam.l_intr_p[3]; //主点y坐标
    }
    
    void convertDepthToPointCloud(const uint16_t *pDepth, int width, int height, int middleIndex)
    {
        if (NULL == pDepth)
        {
            printf("depth frame is NULL!");
            return;
        }
    
        //分辨率缩放,这里假设标定时的分辨率分RESOULTION_X,RESOULTION_Y
        float fdx = g_IntrinsicParam.f_x * ((float)(width) / RESOULTION_X);
        float fdy = g_IntrinsicParam.f_y * ((float)(height) / RESOULTION_Y);
        float u0  = g_IntrinsicParam.c_x * ((float)(width) / RESOULTION_X);
        float v0  = g_IntrinsicParam.c_y * ((float)(height) / RESOULTION_Y);
        // 得到被扫描平面中心点的坐标
        SPoint midPoint(pDepth[middleIndex] * (width / 2 - u0) / fdx, pDepth[middleIndex] * ((height + 1) / 2 - v0) / fdy, pDepth[middleIndex]);
    
        vector<Point> points;
        uint16_t max_depth = MAX_DISTANCE;
        uint16_t min_depth = MIN_DISTANCE;
        float x, y, z;
    
        for (int v = 0; v < height; ++v)
        {
            for (int u = 0; u < width; ++u)
            {
                uint16_t depth = pDepth[v * width + u];
                if (depth <= 0 || depth < min_depth || depth > max_depth)
                    continue;
    
                float tx = (u - u0) / fdx;
                float ty = (v - v0) / fdy;
                // 将中心点定义为世界坐标系的原点
                x = depth * tx - midPoint.x, y = depth * ty - midPoint.y, z = depth;
    
                if (x + 100.0f <= 0.001f || x - 100.0f >= 0.001f) continue;  // 滤波
                if (y + 100.0f <= 0.001f || y - 100.0f >= 0.001f) continue;
                points.push_back(SPoint(x, y, z));
    
            }
        }
        for (size_t i = 0; i < points.size(); ++i)
            cout << points[i].x << " " << points[i].y << " " <<points[i].z << endl;
    }
    
    int g_imageCount = 0;
    
    void analyzeFrame(const VideoFrameRef& frame)
    {
        DepthPixel* pDepth;
    
        g_imageCount++;
        if (MAX_FRAME_COUNT < g_imageCount)
        {
            return;
        }
    
        //int middleIndex = (frame.getHeight() + 1)*frame.getWidth() / 2;
    
        switch (frame.getVideoMode().getPixelFormat())
        {
        case PIXEL_FORMAT_DEPTH_1_MM:
            pDepth = (DepthPixel*)frame.getData();
            //printf("[%08llu] %8d\n", (long long)frame.getTimestamp(),
            //    pDepth[middleIndex]);
    
            //将深度数据转换为点云并保存成ply文件,每帧深度数据对应一个ply文件
            convertDepthToPointCloud(pDepth, frame.getWidth(), frame.getHeight(), (frame.getHeight() + 1) * frame.getWidth() / 2);
            break;
        default:
            printf("Unknown format\n");
        }
    }
    
    class PrintCallback : public VideoStream::NewFrameListener
    {
    public:
        void onNewFrame(VideoStream& stream)
        {
            stream.readFrame(&m_frame);
    
            analyzeFrame(m_frame);
        }
    private:
        VideoFrameRef m_frame;
    };
    
    int main(int argc, char* argv[])
    {
        //initialize openNI sdk
        Status rc = OpenNI::initialize();
        if (rc != STATUS_OK)
        {
            printf("Initialize failed\n%s\n", OpenNI::getExtendedError());
            return 1;
        }
    
        //open deivce
        Device device;
        rc = device.open(ANY_DEVICE);
        if (rc != STATUS_OK)
        {
            printf("Couldn't open device\n%s\n", OpenNI::getExtendedError());
            return 2;
        }
    
        VideoStream depth;
    
        //create depth stream
        if (device.getSensorInfo(SENSOR_DEPTH) != NULL)
        {
            rc = depth.create(device, SENSOR_DEPTH);
            if (rc != STATUS_OK)
            {
                printf("Couldn't create depth stream\n%s\n", OpenNI::getExtendedError());
            }
        }
    
        //start depth stream
        rc = depth.start();
        if (rc != STATUS_OK)
        {
            printf("Couldn't start the depth stream\n%s\n", OpenNI::getExtendedError());
        }
    
        PrintCallback depthPrinter;
    
        // Register frame listener
        depth.addNewFrameListener(&depthPrinter);
    
        //get intrinsic parameter from device
        getCameraParams(device, g_IntrinsicParam);
    
        // Wait while we're getting frames through the printer
        while (MAX_FRAME_COUNT > g_imageCount)
        {
            Sleep(100);
        }
    
        depth.removeNewFrameListener(&depthPrinter);
    
        //stop depth stream
        depth.stop();
    
        //destroy depth stream
        depth.destroy();
    
        //close device
        device.close();
    
        //shutdown OpenNI
        OpenNI::shutdown();
    
        return 0;
    }
    
    输出结果
     转换世界坐标系后的点云坐标
    

    回复 (1)

    举报
  • 发布于2021-06-25 11:27:38

    只看该作者 显示全部

    确实出现了4个毫米的误差

    回复 (0)

    举报
  • 发布于2021-06-25 11:29:12

    只看该作者 显示全部

    这个误差是如何产生的?是应为相机平面与我给定的平面存在倾斜造成的?还是相机本身精度造成的?

    回复 (0)

    举报
  • 发布于2021-06-25 11:34:12

    只看该作者 显示全部
    float fdx = g_IntrinsicParam.f_x * ((float)(width) / RESOULTION_X);
        float fdy = g_IntrinsicParam.f_y * ((float)(height) / RESOULTION_Y);
        float u0  = g_IntrinsicParam.c_x * ((float)(width) / RESOULTION_X);
        float v0  = g_IntrinsicParam.c_y * ((float)(height) / RESOULTION_Y);
        追问,这个转换是按照米为单位还是毫米?OpenNI也提供了一个转换函数。是什么单位?
    

    回复 (0)

    举报
  • 发布于2021-06-26 10:52:02

    只看该作者 显示全部
    133****0812 发表于 2021-06-25 11:29:12

    这个误差是如何产生的?是应为相机平面与我给定的平面存在倾斜造成的?还是相机本身精度造成的?

    1.相机平面与我给定的平面存在倾斜造成的?
    —-这个因素也是有可能的,因为很难保证相机光轴与墙面绝对垂直,只能是尽量减少倾斜带来的影响。
    2.还是相机本身精度造成的?
    —-相机本身是基本结构光的原理来工作的, 测量精度也是随着物镜距离增加从而带来一定的精度误差,这个误差一般来说与距离的平方成正比。

    回复 (1)

    举报
  • 发布于2021-06-26 10:47:35

    只看该作者 显示全部
    133****0812 发表于 2021-06-25 11:34:12

    float fdx = g_IntrinsicParam.f_x * ((float)(width) / RESOULTION_X);
        float fdy = g_IntrinsicParam.f_y * ((float)(height) / RESOULTION_Y);
        float u0  = g_IntrinsicParam.c_x * ((float)(width) / RESOULTION_X);
        float v0  = g_IntrinsicParam.c_y * ((float)(height) / RESOULTION_Y);
        追问,这个转换是按照米为单位还是毫米?OpenNI也提供了一个转换函数。是什么单位?
    

    你好, 这里的座标换算单位是以毫米为单位 。

    回复 (1)

    举报
  • 发布于2021-06-26 10:50:38

    只看该作者 显示全部
    童虎 发表于 2021-06-26 10:47:35

    你好, 这里的座标换算单位是以毫米为单位 。

    通过拟合平面,可以减小误差。相机平面与墙面绝对不会平行。所以,要先算相机平面与墙面的夹角。然后通过给定的(x, y)来求修正后的深度值,误差不大了。谢谢大佬的提携和指点。

    回复 (0)

    举报
  • 发布于2021-08-27 16:58:46

    只看该作者 显示全部

    这是由立体视觉的原理决定的,结构光相机本质上是个双目相机。双目估计深度的原理是,估计左眼的像素点在右眼中的偏移,称为视差,而深度值的变化量和图像视差成反比:
    1/Z1-1/Z2= d(x1-x2)/(Baseline*FocalLength)

    假设视差d只能估计到1/8像素精度,基线B=75mm,图像焦距F=555pixel@VGA分辨率,那么带入公式左右差不多相等的,即:
    1/1152-1/1156 = 0.125/(75*555)
    就是说1152mm处,深度量化的间隔是4mm。

    已知Z1求Z2,公式:
    Z2=1/(1/Z1-0.125/(75*555))
    Z1=2000时,Z2=2012mm,增量是12mm。
    Z1=4000时,Z2=4049mm,增量是49mm。
    Z1=8000时,Z2=8197mm,增量是197mm。
    可见,双目相机测距的误差,随着距离增加不是线性增加的,距离越远增大得越快。

    实际上,结构光深度相机虽然有0到10000mm的量程,能输出的有效整数深度值也只不过有800多个。不信你可以自己算算看。

    回复 (7)

    举报
  • 发布于2021-07-05 13:54:22

    只看该作者 显示全部

    666,值得学习:+1:

    回复 (0)

    举报
  • 发布于2021-07-14 23:34:02

    只看该作者 显示全部
    #include <iostream> 
    #include <OpenNI.h>
    #include <pcl/common/common_headers.h>       
    #include <pcl/visualization/pcl_visualizer.h>
    
    openni::Device mDevice;
    openni::VideoStream mColorStream;
    openni::VideoStream mDepthStream;
    
    bool init() 
    {
        // Initial OpenNI
        if (openni::OpenNI::initialize() != openni::STATUS_OK) 
        {
            std::cerr << "OpenNI Initial Error: " << openni::OpenNI::getExtendedError() << std::endl;
            return false;
        }
        // Open Device
        if (mDevice.open(openni::ANY_DEVICE) != openni::STATUS_OK) 
        {
            std::cerr << "Can't Open Device: " << openni::OpenNI::getExtendedError() << std::endl;
            return false;
        }
        return true;
    }
    
    bool createColorStream() 
    {
        if (mDevice.hasSensor(openni::SENSOR_COLOR)) 
        {
            if (mColorStream.create(mDevice, openni::SENSOR_COLOR) == openni::STATUS_OK) 
            {
                // set video mode
                openni::VideoMode mMode;
                mMode.setResolution(640, 480);
                mMode.setFps(30);
                mMode.setPixelFormat(openni::PIXEL_FORMAT_RGB888);
    
                if (mColorStream.setVideoMode(mMode) != openni::STATUS_OK) {
                    std::cout << "Can't apply VideoMode: " << openni::OpenNI::getExtendedError() << std::endl;
                    return false;
                }
            }
            else 
            {
                std::cerr << "Can't create color stream on device: " << openni::OpenNI::getExtendedError() << std::endl;
                return false;
            }
    
            // start color stream
            mColorStream.start();
            return true;
        }
        return false;
    }
    
    bool createDepthStream() 
    {
        if (mDevice.hasSensor(openni::SENSOR_DEPTH)) 
        {
            if (mDepthStream.create(mDevice, openni::SENSOR_DEPTH) == openni::STATUS_OK) 
            {
                // set video mode
                openni::VideoMode mMode;
                mMode.setResolution(640, 480);
                mMode.setFps(30);
                mMode.setPixelFormat(openni::PIXEL_FORMAT_DEPTH_1_MM);
    
                if (mDepthStream.setVideoMode(mMode) != openni::STATUS_OK) {
                    std::cout << "Can't apply VideoMode to depth stream: " << openni::OpenNI::getExtendedError() << std::endl;
                    return false;
                }
            }
            else 
            {
                std::cerr << "Can't create depth stream on device: " << openni::OpenNI::getExtendedError() << std::endl;
                return false;
            }
            // start depth stream
            mDepthStream.start();
            // image registration
            if (mDevice.isImageRegistrationModeSupported(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR))
                mDevice.setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
            else
                std::cerr << "Don't support registration" << std::endl;
            return true;
        }
        else 
        {
            std::cerr << "ERROR: This device does not have depth sensor" << std::endl;
            return false;
        }
    }
    
    //openni图像流转化成点云
    bool getCloudXYZCoordinate(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_XYZRGB) 
    {
        openni::VideoFrameRef  colorFrame;
        mColorStream.readFrame(&colorFrame);
        openni::RGB888Pixel* pColor = (openni::RGB888Pixel*)colorFrame.getData();
    
        openni::VideoFrameRef  mDepthFrame;
        if (mDepthStream.readFrame(&mDepthFrame) == openni::STATUS_OK) 
        {
            float fx, fy, fz;
            int i = 0;
            //以米为单位
            double fScale = 0.001;
            openni::DepthPixel* pDepthArray = (openni::DepthPixel*)mDepthFrame.getData();
            for (int y = 0; y < mDepthFrame.getHeight(); y++) 
            {
                for (int x = 0; x < mDepthFrame.getWidth(); x++) 
                {
                    int idx = x + y * mDepthFrame.getWidth();
                    const openni::DepthPixel rDepth = pDepthArray[idx];
                    openni::CoordinateConverter::convertDepthToWorld(mDepthStream, x, y, rDepth, &fx, &fy, &fz);
                    fx = -fx;
                    fy = -fy;
                    cloud_XYZRGB->points[i].x = fx * fScale;
                    cloud_XYZRGB->points[i].y = fy * fScale;
                    cloud_XYZRGB->points[i].z = fz * fScale;
                    cloud_XYZRGB->points[i].r = pColor[i].r;
                    cloud_XYZRGB->points[i].g = pColor[i].g;
                    cloud_XYZRGB->points[i].b = pColor[i].b;
                    i++;
                }
            }
            return true;
        }
        else 
        {
            std::cout << "getCloudXYZCoordinate: fail to read frame from depth stream" << std::endl;
            return false;
        }
    }
    
    int main() 
    {
        //openni初始化、打开摄像头
        if (!init()) {
            std::cout << "Fail to init ..." << std::endl;
            return -1;
        }
        //openni创建图像流
        if (createColorStream() && createDepthStream())
        {
            std::cout << "displayPointCloud: create color stream and depth stream ..." << std::endl;
        }
        else 
        {
            std::cout << "displayPointCloud: can not create color stream and depth stream ..." << std::endl;
            return -1;
        }
        //创建pcl云
        pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_XYZRGB(new pcl::PointCloud<pcl::PointXYZRGB>());
        cloud_XYZRGB->width = 640;
        cloud_XYZRGB->height = 480;
        cloud_XYZRGB->points.resize(static_cast<size_t>(cloud_XYZRGB->width) * static_cast<size_t>(cloud_XYZRGB->height));
        //pcl可视化
        pcl::visualization::PCLVisualizer::Ptr m_pViewer(new pcl::visualization::PCLVisualizer("Viewer"));
        m_pViewer->setCameraPosition(0, 0, -2, 0, -1, 0, 0);
        m_pViewer->addCoordinateSystem(0.3);
        while (!m_pViewer->wasStopped()) {
            getCloudXYZCoordinate(cloud_XYZRGB);
            pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgb(cloud_XYZRGB);
            m_pViewer->addPointCloud<pcl::PointXYZRGB>(cloud_XYZRGB, rgb, "cloud");
            m_pViewer->spinOnce();
            m_pViewer->removeAllPointClouds();
        }
        mColorStream.destroy();
        mDepthStream.destroy();
        mDevice.close();
        openni::OpenNI::shutdown();
    
        return 0;
    }
    

    回复 (0)

    举报
返回上级
共9条
跳至  页 
举报

请选择举报理由

  • 垃圾广告
  • 违规内容
  • 恶意灌水
  • 重复发帖
提示

奥比中光 · 3D视觉开发者社区...

站长统计