From 4b6e7789e25519d90b49034a77c2b8ae782a6380 Mon Sep 17 00:00:00 2001 From: Marc Toussaint Date: Tue, 31 Oct 2023 17:39:14 +0100 Subject: [PATCH] calibPoints with joint optimization; rai renamings --- CMakeLists.txt | 13 +- config.mk | 2 +- rai | 2 +- retired/perception/calibration/main.cpp | 8 +- retired/perception/objectDetection/main.cpp | 2 +- retired/perception/registration/main.cpp | 22 +- retired/perception/simulateCam/main.cpp | 6 +- src/BotOp/bot.cpp | 8 +- src/BotOp/bot.h | 2 +- src/BotOp/{pyBot.cxx => py-BotOp.cxx} | 6 +- src/BotOp/{pyBot.h => py-BotOp.h} | 0 src/BotOp/simulation.cpp | 5 +- src/BotOp/tools.cpp | 4 +- src/FlatVision/flatVision.cpp | 26 +- src/FlatVision/helpers.cpp | 16 +- src/FlatVision/helpers.h | 2 +- src/FlatVision/objectManager.cpp | 6 +- src/FlatVision/objectManager.h | 2 +- src/FlatVision/retired/_objectManager.cpp | 14 +- .../cv_depth_backgroundSubstraction.cpp | 8 +- .../retired/cv_depth_backgroundSubstraction.h | 6 +- src/MarkerVision/cvTools.cpp | 28 +- src/MarkerVision/cvTools.h | 4 +- src/RealSense/MultiRealSenseThread.cpp | 8 +- src/RealSense/MultiRealSenseThread.h | 2 +- src/RealSense/RealSenseThread.cpp | 8 +- src/RealSense/RealSenseThread.h | 4 +- test/02-camera/main.cpp | 4 +- test/03-blobTracking/main.cpp | 8 +- test/14-calibPoints/main.cpp | 243 +++++++++++++----- test/14-calibPoints/rai.cfg | 6 +- test/14-calibPoints/station.g | 2 +- 32 files changed, 307 insertions(+), 170 deletions(-) rename src/BotOp/{pyBot.cxx => py-BotOp.cxx} (96%) rename src/BotOp/{pyBot.h => py-BotOp.h} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 73af8b9..c3b249d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -200,12 +200,12 @@ if(USE_PYBIND) add_definitions(-DRAI_PYBIND) include_directories(${pybind11_INCLUDE_DIRS}) - pybind11_add_module(ry SHARED + pybind11_add_module(_robotic SHARED rai/src/ry/unity.cxx - src/BotOp/pyBot.cxx + src/BotOp/py-BotOp.cxx ) - target_link_libraries(ry PRIVATE + target_link_libraries(_robotic PRIVATE rai ) endif() @@ -219,6 +219,11 @@ message(STATUS "[rai] compiler flags: " "${_defs}") ################################################################################ +add_custom_target(docstrings #ALL + DEPENDS _robotic + COMMAND pybind11-stubgen --ignore-invalid=all _robotic + COMMAND mv stubs/_robotic*/__init__.pyi _robotic.pyi) + add_executable(kinEdit rai/bin/src_kinEdit/main.cpp) target_link_libraries(kinEdit rai) @@ -235,7 +240,7 @@ set(PYTHON_SITE "lib/python${PYBIND11_PYTHON_VERSION}/site-packages") message(STATUS "[rai] installing python packages in " ${CMAKE_INSTALL_PREFIX}/${PYTHON_SITE}/robotic ) install( - TARGETS ry + TARGETS _robotic DESTINATION ${CMAKE_INSTALL_PREFIX}/${PYTHON_SITE}/robotic) file(GLOB sources rai/src/* src/*) diff --git a/config.mk b/config.mk index 41975bb..bfea396 100644 --- a/config.mk +++ b/config.mk @@ -27,7 +27,7 @@ CERES = 0 NLOPT = 0 IPOPT = 0 -## we use the following collision/physics libraries by default, but can be disabled +## we can use the following collision/physics libraries, some disabled by default #FCL = 0 #PHYSX = 0 BULLET = 0 diff --git a/rai b/rai index 37289c8..eafb5ff 160000 --- a/rai +++ b/rai @@ -1 +1 @@ -Subproject commit 37289c804dde0cfce1ab879fc98b6aa28273a638 +Subproject commit eafb5ffb948984b685aa7177be15b9c3ef238cc2 diff --git a/retired/perception/calibration/main.cpp b/retired/perception/calibration/main.cpp index c1482c4..be1b9c4 100644 --- a/retired/perception/calibration/main.cpp +++ b/retired/perception/calibration/main.cpp @@ -82,11 +82,11 @@ void online(){ cout <<"CALIB:" <setZero(); - armCalib->operator()(0,0) = calib(1)/fxypxy(1); - armCalib->operator()(0,1) = calib(0)/fxypxy(0); + armCalib->operator()(0,0) = calib(1)/fxycxy(1); + armCalib->operator()(0,1) = calib(0)/fxycxy(0); // armCalib->operator()(0,2) = .2* calib(2); armCalib->operator()(0,5) = calib(2); } @@ -151,7 +151,7 @@ void online(){ <<" mean depth=" < cameraDepth(RS.depth); diff --git a/retired/perception/registration/main.cpp b/retired/perception/registration/main.cpp index 5d11bc9..9274f22 100644 --- a/retired/perception/registration/main.cpp +++ b/retired/perception/registration/main.cpp @@ -16,7 +16,7 @@ void online(){ Var cam_depth; //camera output Var cam_color; //camera output Var cam_pose; //camera pose - Var cam_Fxypxy; //camera parameters + Var cam_fxycxy; //camera parameters Var regError; @@ -25,8 +25,8 @@ void online(){ //-- camera thread auto cam = make_shared(cam_color, cam_depth); cam_depth.waitForNextRevision(); - cam_Fxypxy.set() = cam->depth_fxypxy; - cout <<"RS calib=" <depth_fxycxy; + cout <<"RS calib=" <sub(cT,-cB-1,cL,-cR-1,0,-1); _cam_depth = cam_depth.get()->sub(cT,-cB-1,cL,-cR-1); - _cam_fxypxy = cam_Fxypxy.get(); + _cam_fxycxy = cam_fxycxy.get(); _cam_pose = cam_pose.get(); - _cam_fxypxy(2) -= cL; - _cam_fxypxy(3) -= cT; + _cam_fxycxy(2) -= cL; + _cam_fxycxy(3) -= cT; pixelColorNormalizeIntensity(_cam_color); @@ -68,7 +68,7 @@ void online(){ //extract object from cluster FlatPercept& flat = exNovel.flats.first(); - ptr obj = createObjectFromPercept(flat, labels, _cam_color, _cam_depth, _cam_pose, _cam_fxypxy, OT_box); + ptr obj = createObjectFromPercept(flat, labels, _cam_color, _cam_depth, _cam_pose, _cam_fxycxy, OT_box); cout <<"CREATED OBJECT: " <<*obj <sub(cT,-cB-1,cL,-cR-1,0,-1); _cam_depth = cam_depth.get()->sub(cT,-cB-1,cL,-cR-1); - _cam_fxypxy = cam_Fxypxy.get(); + _cam_fxycxy = cam_fxycxy.get(); _cam_pose = cam_pose.get(); - _cam_fxypxy(2) -= cL; - _cam_fxypxy(3) -= cT; + _cam_fxycxy(2) -= cL; + _cam_fxycxy(3) -= cT; pixelColorNormalizeIntensity(_cam_color); diff --git a/retired/perception/simulateCam/main.cpp b/retired/perception/simulateCam/main.cpp index d46478d..0110e00 100644 --- a/retired/perception/simulateCam/main.cpp +++ b/retired/perception/simulateCam/main.cpp @@ -83,7 +83,7 @@ void test_pickAndPlace(){ K.optimizeTree(); Var cameraPose; cameraPose.set() = K["camera"]->X.getArr7d(); - Var cameraFxypxy; + Var cameraFxycxy; Var ctrl; @@ -108,7 +108,7 @@ void test_pickAndPlace(){ //-- camera simulation rai::Sim_CameraView CV(RK, .05, "camera"); auto sen = CV.C.currentSensor; - cameraFxypxy.set() = ARR(sen->cam.focalLength*sen->height, sen->cam.focalLength*sen->height, .5*sen->width, .5*sen->height); + cameraFxycxy.set() = ARR(sen->cam.focalLength*sen->height, sen->cam.focalLength*sen->height, .5*sen->width, .5*sen->height); //-- pipeline: depth -> point cloud // Depth2PointCloud depth2pc(CV.depth, CV.C.currentSensor->cam.focalLength*300); @@ -150,7 +150,7 @@ void test_pickAndPlace(){ // return 0; // }); - CV_BackgroundSubstraction_Thread BS(CV.color, CV.depth, robotMask.color, cameraPose, cameraFxypxy, 1); + CV_BackgroundSubstraction_Thread BS(CV.color, CV.depth, robotMask.color, cameraPose, cameraFxycxy, 1); cout <<"WAITING FOR BACKGROUND..." <("bot/useGripper", true); bool robotiq = rai::getParameter("bot/useRobotiq", false); rai::String useArm = rai::getParameter("bot/useArm", "left"); + bool blockRealRobot = rai::getParameter("bot/blockRealRobot", false); C.ensure_indexedJoints(); qHome = C.getJointState(); state.set()->initZero(qHome.N); + if(blockRealRobot && useRealRobot){ + LOG(0) <<"-- blocking useRealRobot -- "; + useRealRobot=false; + } + //-- launch robots & grippers if(useRealRobot){ LOG(0) <<"CONNECTING TO FRANKAS"; @@ -388,7 +394,7 @@ void BotOp::getImageAndDepth(byteA& image, floatA& depth, const char* sensor){ cam->getImageAndDepth(image, depth); } -arr BotOp::getCameraFxypxy(const char* sensor){ +arr BotOp::getCameraFxycxy(const char* sensor){ auto cam = getCamera(sensor); return cam->getFxycxy(); } diff --git a/src/BotOp/bot.h b/src/BotOp/bot.h index 05f7c88..495b669 100644 --- a/src/BotOp/bot.h +++ b/src/BotOp/bot.h @@ -65,7 +65,7 @@ struct BotOp{ //-- camera commands void getImageAndDepth(byteA& image, floatA& depth, const char* sensor); void getImageDepthPcl(byteA& image, floatA& depth, arr& points, const char* sensor, bool globalCoordinates=false); - arr getCameraFxypxy(const char* sensor); + arr getCameraFxycxy(const char* sensor); //-- sync the user's C with the robot, update the display, return pressed key int sync(rai::Configuration& C, double waitTime=.1); diff --git a/src/BotOp/pyBot.cxx b/src/BotOp/py-BotOp.cxx similarity index 96% rename from src/BotOp/pyBot.cxx rename to src/BotOp/py-BotOp.cxx index 83f3a52..0c41a56 100644 --- a/src/BotOp/pyBot.cxx +++ b/src/BotOp/py-BotOp.cxx @@ -8,7 +8,7 @@ #ifdef RAI_PYBIND -#include "pyBot.h" +#include "py-BotOp.h" #include @@ -24,7 +24,7 @@ void init_BotOp(pybind11::module& m) { - pybind11::class_>(m, "BotOp", "needs some docu!") + pybind11::class_>(m, "BotOp", "Robot Operation interface -- see https://marctoussaint.github.io/robotics-course/tutorials/1b-botop.html") .def(pybind11::init(), "constructor", @@ -114,7 +114,7 @@ void init_BotOp(pybind11::module& m) { "returns if gripper is done", pybind11::arg("leftRight")) - .def("getCameraFxypxy", &BotOp::getCameraFxypxy, + .def("getCameraFxycxy", &BotOp::getCameraFxycxy, "returns camera intrinsics", pybind11::arg("sensorName")) diff --git a/src/BotOp/pyBot.h b/src/BotOp/py-BotOp.h similarity index 100% rename from src/BotOp/pyBot.h rename to src/BotOp/py-BotOp.h diff --git a/src/BotOp/simulation.cpp b/src/BotOp/simulation.cpp index 0532008..6581282 100644 --- a/src/BotOp/simulation.cpp +++ b/src/BotOp/simulation.cpp @@ -117,8 +117,11 @@ void BotThreadedSim::step(){ P_compliance = cmdGet->P_compliance; } + if(cmd_q_ref.N && cmd_qDot_ref.N){ sim->step((cmd_q_ref, cmd_qDot_ref), tau, sim->_posVel); -// sim->step({}, tau, sim->_none); + }else{ + sim->step({}, tau, sim->_none); + } q_real = simConfig.getJointState(); if(cmd_qDot_ref.N==qDot_real.N) qDot_real = cmd_qDot_ref; diff --git a/src/BotOp/tools.cpp b/src/BotOp/tools.cpp index 845a352..5134712 100644 --- a/src/BotOp/tools.cpp +++ b/src/BotOp/tools.cpp @@ -39,11 +39,11 @@ bool sense_HsvBlob(BotOp& bot, rai::Configuration& C, const char* camName, const arr x; if(Pinv.N){ - makeHomogeneousImageCoordinate(u, img.d0); + makeHomogeneousImageCoordinate(u); x = Pinv*u; }else{ x = u; - depthData2point(x, bot.getCameraFxypxy(camName)); + depthData2point(x, bot.getCameraFxycxy(camName)); } if(verbose>0) LOG(0) <<"dot in cam coords: " <get_X().applyOnPoint(x); diff --git a/src/FlatVision/flatVision.cpp b/src/FlatVision/flatVision.cpp index 381be4b..4535365 100644 --- a/src/FlatVision/flatVision.cpp +++ b/src/FlatVision/flatVision.cpp @@ -55,8 +55,8 @@ void FlatVisionThread::step(){ // TODO this assumes that the calibration was done with the already cropped image! // This of course can be fixed easily in the project method, which should get the cropping parameters -// _cam_fxypxy(2) -= cL; -// _cam_fxypxy(3) -= cT; +// _cam_fxycxy(2) -= cL; +// _cam_fxycxy(3) -= cT; //-- background @@ -70,32 +70,32 @@ void FlatVisionThread::step(){ exRobot.compute(labels, _cam_color, _cam_depth, _model_segments, _model_depth); { // cout <<"calib_L=" <clear(); // for(FlatPercept& flat : exNovel.flats){ -// P->append( novelPerceptToObject(flat, labels, _cam_depth, _cam_pose, _cam_fxypxy) ); +// P->append( novelPerceptToObject(flat, labels, _cam_depth, _cam_pose, _cam_fxycxy) ); // } // } } diff --git a/src/FlatVision/helpers.cpp b/src/FlatVision/helpers.cpp index 0275eba..464c3a3 100644 --- a/src/FlatVision/helpers.cpp +++ b/src/FlatVision/helpers.cpp @@ -73,7 +73,7 @@ void recomputeObjMinMaxAvgDepthSize(std::shared_ptr obj){ } } -void create3DfromFlat(std::shared_ptr obj, NovelObjectType type, const arr& fxypxy){ +void create3DfromFlat(std::shared_ptr obj, NovelObjectType type, const arr& fxycxy){ //get (top) center arr center = zeros(3); double sum=0.; @@ -88,7 +88,7 @@ void create3DfromFlat(std::shared_ptr obj, NovelObjectType type, const a } } center /= sum; - depthData2point(center, fxypxy); + depthData2point(center, fxycxy); obj->pose.pos = center; if(type==OT_pcl){ @@ -96,7 +96,7 @@ void create3DfromFlat(std::shared_ptr obj, NovelObjectType type, const a floatA _depth = obj->depth.sub(obj->rect(1), obj->rect(3)-1, obj->rect(0), obj->rect(2)-1); floatA _mask = obj->mask.sub(obj->rect(1), obj->rect(3)-1, obj->rect(0), obj->rect(2)-1); arr V; - depthData2pointCloud(V, _depth, fxypxy(0), fxypxy(1), fxypxy(2)-obj->rect(0), fxypxy(3)-obj->rect(1)); + depthData2pointCloud(V, _depth, fxycxy(0), fxycxy(1), fxycxy(2)-obj->rect(0), fxycxy(3)-obj->rect(1)); V.reshape(V.N/3, 3); uint maskN=0; for(uint i=0;i<_mask.N;i++) if(_mask.elem(i)>.5) maskN++; @@ -129,7 +129,7 @@ void create3DfromFlat(std::shared_ptr obj, NovelObjectType type, const a } }else NIY; - for(uint i=0;imesh.clear(); @@ -155,7 +155,7 @@ void create3DfromFlat(std::shared_ptr obj, NovelObjectType type, const a ptr createObjectFromPercept(const FlatPercept& flat, const byteA& labels, const byteA& cam_color, const floatA& cam_depth, - const arr& cam_pose, const arr& fxypxy, + const arr& cam_pose, const arr& fxycxy, NovelObjectType type){ cv::Mat cv_cam_depth = CV(cam_depth); @@ -174,7 +174,7 @@ ptr createObjectFromPercept(const FlatPercept& flat, //-- create object's 3D shape obj->pose.setZero(); - create3DfromFlat(obj, type, fxypxy); + create3DfromFlat(obj, type, fxycxy); obj->mesh.clear(); @@ -189,7 +189,7 @@ ptr createObjectFromPercept(const FlatPercept& flat, arr getPCLforLabels(PixelLabel label, const byteA& labels, const floatA& cam_depth, - const arr& cam_pose, const arr& fxypxy){ + const arr& cam_pose, const arr& fxycxy){ uint n=0; for(byte l:labels) if(l==label) n++; @@ -208,7 +208,7 @@ arr getPCLforLabels(PixelLabel label, } CHECK_EQ(n, V.d0, ""); - for(uint i=0;i& flats, } } -void ObjectManager::displayLabelsAsPCL(PixelLabel label, const byteA& labels, const floatA& cam_depth, const arr& cam_pose, const arr& cam_fxypxy, rai::KinematicWorld& config){ - arr V = getPCLforLabels(label, labels, cam_depth, cam_pose, cam_fxypxy); +void ObjectManager::displayLabelsAsPCL(PixelLabel label, const byteA& labels, const floatA& cam_depth, const arr& cam_pose, const arr& cam_fxycxy, rai::KinematicWorld& config){ + arr V = getPCLforLabels(label, labels, cam_depth, cam_pose, cam_fxycxy); rai::Frame *f = getFrame(config, 0, STRING("pcl_"<<(int)label)); f->shape->visual = true; f->X.set(cam_pose); @@ -408,7 +408,7 @@ void ObjectManager::displayLabelsAsPCL(PixelLabel label, const byteA& labels, co //if(&directSync){ -// ptr obj = createObjectFromPercept(flat, labels, cam_color, cam_depth, cam_pose, cam_fxypxy, OT_pcl); +// ptr obj = createObjectFromPercept(flat, labels, cam_color, cam_depth, cam_pose, cam_fxycxy, OT_pcl); // obj->pixelLabel = PixelLabel(PL_objects + obj->object_ID); // rai::Frame *f=getFrame(directSync, 0, STRING("perc_"<shape->visual = false; diff --git a/src/FlatVision/objectManager.h b/src/FlatVision/objectManager.h index 821d631..f089ddd 100644 --- a/src/FlatVision/objectManager.h +++ b/src/FlatVision/objectManager.h @@ -47,7 +47,7 @@ struct ObjectManager{ void displayLabelsAsPCL(PixelLabel label, const byteA& labels, const floatA& cam_depth, - const arr& cam_pose, const arr& cam_fxypxy, + const arr& cam_pose, const arr& cam_fxycxy, rai::KinematicWorld& config); void displayLabels(const byteA& labels, diff --git a/src/FlatVision/retired/_objectManager.cpp b/src/FlatVision/retired/_objectManager.cpp index 71c75eb..629e3b1 100644 --- a/src/FlatVision/retired/_objectManager.cpp +++ b/src/FlatVision/retired/_objectManager.cpp @@ -90,8 +90,8 @@ bool ObjectManager::mergePerceptIntoObjects(FlatPercept& perc, void ObjectManager::createNewObjectFromPercept(FlatPercept& p, const byteA& labels, const byteA& cam_color, const floatA& cam_depth, - const arr& cam_pose, const arr& cam_fxypxy){ - ptr obj = novelPerceptToObject(p, labels, cam_color, cam_depth, cam_pose, cam_fxypxy, OT_box); + const arr& cam_pose, const arr& cam_fxycxy){ + ptr obj = novelPerceptToObject(p, labels, cam_color, cam_depth, cam_pose, cam_fxycxy, OT_box); obj->object_ID = objIdCount++; obj->pixelLabel = PixelLabel(PL_objects + obj->object_ID); objects.set()->append(obj); @@ -102,7 +102,7 @@ void ObjectManager::processNovelPercepts(rai::Array& flats, const byteA& labels, const byteA& cam_color, const floatA& cam_depth, const byteA& model_segments, const floatA& model_depth, - const arr& cam_pose, const arr& cam_fxypxy){ + const arr& cam_pose, const arr& cam_fxycxy){ for(FlatPercept& p:flats){ bool success = mergePerceptIntoObjects(p, labels, cam_color, cam_depth, model_segments, model_depth); @@ -110,14 +110,14 @@ void ObjectManager::processNovelPercepts(rai::Array& flats, //create novel object if it can't be merged if(!success){ if(objIdCount<=0){ - createNewObjectFromPercept(p,labels, cam_color, cam_depth, cam_pose, cam_fxypxy); + createNewObjectFromPercept(p,labels, cam_color, cam_depth, cam_pose, cam_fxycxy); } } } } -void ObjectManager::updateObjectPose(PixelLabel label, const arr& calib, const arr& cam_fxypxy){ +void ObjectManager::updateObjectPose(PixelLabel label, const arr& calib, const arr& cam_fxycxy){ ptr obj; auto O = objects.set(); for(ptr& o:O()) if(o->pixelLabel==label){ obj=o; break; } @@ -127,8 +127,8 @@ void ObjectManager::updateObjectPose(PixelLabel label, const arr& calib, const a } double step=.5; - obj->pose.pos.x += step * calib(0)/cam_fxypxy(0); - obj->pose.pos.y += step * calib(1)/cam_fxypxy(1); + obj->pose.pos.x += step * calib(0)/cam_fxycxy(0); + obj->pose.pos.y += step * calib(1)/cam_fxycxy(1); // obj->pose.pos.z += 0.2*step * calib(0,2); // obj->pose.rot.addX(0.2*step*calib(0,3)); // obj->pose.rot.addY(0.2*step*calib(0,4)); diff --git a/src/FlatVision/retired/cv_depth_backgroundSubstraction.cpp b/src/FlatVision/retired/cv_depth_backgroundSubstraction.cpp index e30b906..63b44bb 100644 --- a/src/FlatVision/retired/cv_depth_backgroundSubstraction.cpp +++ b/src/FlatVision/retired/cv_depth_backgroundSubstraction.cpp @@ -194,8 +194,8 @@ void CV_BackgroundSubstraction_Thread::step(){ compute(_color, _depth, _inputMask); - arr fxypxy = cameraFxypxy.get(); - CHECK(fxypxy.N, "need camera calibration parameters"); + arr fxycxy = cameraFxycxy.get(); + CHECK(fxycxy.N, "need camera calibration parameters"); if(false){ auto P = percepts.set(); P->clear(); @@ -204,11 +204,11 @@ void CV_BackgroundSubstraction_Thread::step(){ auto p = make_shared(); p->id=k++; //add the observed points to the mesh: - depthData2pointCloud(p->mesh.V, cv_p.depthRect, fxypxy(0), fxypxy(1), fxypxy(2)-cv_p.rect(0), fxypxy(3)-cv_p.rect(1)); + depthData2pointCloud(p->mesh.V, cv_p.depthRect, fxycxy(0), fxycxy(1), fxycxy(2)-cv_p.rect(0), fxycxy(3)-cv_p.rect(1)); p->mesh.V.reshape(p->mesh.V.N/3, 3); //add the background points to the mesh: (assuming object to rest on background) arr V2; - depthData2pointCloud(V2, cv_p.backgroundRect, fxypxy(0), fxypxy(1), fxypxy(2)-cv_p.rect(0), fxypxy(3)-cv_p.rect(1)); + depthData2pointCloud(V2, cv_p.backgroundRect, fxycxy(0), fxycxy(1), fxycxy(2)-cv_p.rect(0), fxycxy(3)-cv_p.rect(1)); V2.reshape(V2.N/3, 3); p->mesh.V.append(V2); //make convex diff --git a/src/FlatVision/retired/cv_depth_backgroundSubstraction.h b/src/FlatVision/retired/cv_depth_backgroundSubstraction.h index 4f5ccce..02c97c2 100644 --- a/src/FlatVision/retired/cv_depth_backgroundSubstraction.h +++ b/src/FlatVision/retired/cv_depth_backgroundSubstraction.h @@ -42,20 +42,20 @@ struct CV_BackgroundSubstraction_Thread : Thread, CV_BackgroundSubstraction { Var depth; Var mask; Var cameraPose; - Var cameraFxypxy; + Var cameraFxycxy; CV_BackgroundSubstraction_Thread(Var& _percepts, Var& _color, Var& _depth, Var& _mask, Var& _cameraPose, - Var& _cameraFxypxy, int _verbose=1) + Var& _cameraFxycxy, int _verbose=1) : Thread("BackgroundSubstraction", -1.), percepts(this, _percepts), color(this, _color), depth(this, _depth, true), mask(this, _mask), cameraPose(this, _cameraPose), - cameraFxypxy(this, _cameraFxypxy){ + cameraFxycxy(this, _cameraFxycxy){ CV_BackgroundSubstraction::verbose = _verbose; threadOpen(); } diff --git a/src/MarkerVision/cvTools.cpp b/src/MarkerVision/cvTools.cpp index b90f4f6..051b5d1 100644 --- a/src/MarkerVision/cvTools.cpp +++ b/src/MarkerVision/cvTools.cpp @@ -2,24 +2,29 @@ #include -void makeHomogeneousImageCoordinate(arr& u, uint imgHeight){ -// u(1) = double(imgHeight-1)-u(1); -// u(2) *= -1.; +void makeHomogeneousImageCoordinate(arr& u){ u(0) *= u(2); u(1) *= u(2); u.append(1.); } void decomposeInvProjectionMatrix(arr& K, arr& R, arr& t, const arr& P){ - arr KR = P.sub(0,2,0,2); - t = ~P.col(3); - KR = inverse(KR); - lapack_RQ(K, R, KR); + t = P.col(3); t.resizeCopy(3); + arr KRt = inverse(P.sub(0,2,0,2)); + lapack_RQ(K, R, KRt); + //cout <<"K*R" <(y,x); + histograms(0,c.val[0]) ++; + histograms(1,c.val[1]) ++; + histograms(2,c.val[2]) ++; + } } } } diff --git a/src/MarkerVision/cvTools.h b/src/MarkerVision/cvTools.h index 780b77e..be2e166 100644 --- a/src/MarkerVision/cvTools.h +++ b/src/MarkerVision/cvTools.h @@ -1,10 +1,10 @@ #include -void makeHomogeneousImageCoordinate(arr& u, uint imgHeight); +void makeHomogeneousImageCoordinate(arr& u); void decomposeInvProjectionMatrix(arr& K, arr& R, arr& t, const arr& P); -arr getHsvBlobImageCoords(byteA& _rgb, floatA& _depth, const arr& hsvFilter, int verbose=0); +arr getHsvBlobImageCoords(byteA& _rgb, floatA& _depth, const arr& hsvFilter, int verbose=0, arr& histograms=NoArr); struct CameraCalibrationHSVGui { static const char* window_detection_name; diff --git a/src/RealSense/MultiRealSenseThread.cpp b/src/RealSense/MultiRealSenseThread.cpp index 3903608..b20f25e 100644 --- a/src/RealSense/MultiRealSenseThread.cpp +++ b/src/RealSense/MultiRealSenseThread.cpp @@ -125,8 +125,8 @@ RealSenseCamera::RealSenseCamera(std::string cameraName, bool captureColor, bool if(vsp){ rs2_intrinsics intrinsics = vsp.get_intrinsics(); LOG(1) <<" is video: w=" < #include -arr getHsvBlobPosition(cv::Mat& rgb, cv::Mat& depth, const arr& hsvFilter, const arr& Fxypxy){ +arr getHsvBlobPosition(cv::Mat& rgb, cv::Mat& depth, const arr& hsvFilter, const arr& fxycxy){ //blur cv::blur(rgb, rgb, cv::Size(3,3)); @@ -67,7 +67,7 @@ arr getHsvBlobPosition(cv::Mat& rgb, cv::Mat& depth, const arr& hsvFilter, const blobPosition= {objX, objY, objDepth}; // camera coordinates - depthData2point(blobPosition, Fxypxy); //transforms the point to camera xyz coordinates + depthData2point(blobPosition, fxycxy); //transforms the point to camera xyz coordinates // world coordinates //cameraFrame->X.applyOnPoint(objCoords); //transforms into world coordinates @@ -103,7 +103,7 @@ void tracking2(){ RealSenseThread RS("cam"); // grab the camera intrinsics - arr Fxypxy = RS.fxypxy; + arr fxycxy = RS.fxycxy; // set hsv filter parameters arr hsvFilter = rai::getParameter("hsvFilter").reshape(2,3); @@ -119,7 +119,7 @@ void tracking2(){ if(rgb.rows != depth.rows) continue; // blur - arr pos = getHsvBlobPosition(rgb, depth, hsvFilter, Fxypxy); + arr pos = getHsvBlobPosition(rgb, depth, hsvFilter, fxycxy); cout <<"blob position: " < #include +#include + //=========================================================================== +void setupConfig(rai::Configuration& C){ + C.addFile(rai::raiPath("../rai-robotModels/scenarios/pandaSingle.g")); + + arr dots = rai::getParameter("dots"); + dots.reshape(-1,3); + + cout <<"-- adding " <setRelativePosition(dots[i]) + .setShape(rai::ST_cylinder, {.001, .02}) + .setColor({.2, .3, 1}); + } +} void collectData(){ //-- setup a configuration rai::Configuration C; - C.addFile("station.g"); + setupConfig(C); + + arr dots = rai::getParameter("dots"); + uint views = rai::getParameter("views"); + dots.reshape(-1,3); + + OpenGL imgGl; -// C["bellybutton"]->name = "dot0"; rai::Frame* cam = C["cameraWrist"]; rai::Frame* mount = C["l_panda_joint7"]; - BotOp bot(C, rai::getParameter("real")); + BotOp bot(C, rai::getParameter("real", false)); #if 0 bot.gripperClose(rai::_left); @@ -40,13 +61,12 @@ void collectData(){ rai::Graph data; - uint nDots=2; - for(uint d=0;dname<name); + dat.add("dot", dot->ID); + dat.add("q", bot.get_q()); dat.add("mountPose", mount->getPose()); dat.add("camPose", cam->getPose()); - dat.add("camFxypxy", bot.getCameraFxypxy(cam->name)); + dat.add("camFxycxy", bot.getCameraFxycxy(cam->name)); dat.add("dotPosition", dot->getPosition()); dat.add("img", img); dat.add("depth", depth); + imgGl.watchImage(img, false); } } - data.write(FILE("dat.g"), "\n", 0, -1, false, true); + data.write(FILE("dat.g"), ",", "{}", 0); bot.home(C); } @@ -114,8 +137,10 @@ void computeCalibration(){ // set hsv filter parameters arr hsvFilter = rai::getParameter("hsvFilter"); + int checks = rai::getParameter("checks", 1); - arr U(0,4), X(0,3); + arr U(0,4), X(0,4); + arr histogram; OpenGL disp; for(rai::Node *n:data){ @@ -128,39 +153,44 @@ void computeCalibration(){ rai::Transformation mountPose(dat.get("mountPose")); rai::Transformation camPose(dat.get("camPose")); rai::Vector dotWorld(dat.get("dotPosition")); - arr Fxypxy = dat.get("camFxypxy"); + arr fxycxy = dat.get("camFxycxy"); if(img.d0 != depth.d0) continue; - // image coordinates - arr u = getHsvBlobImageCoords(img, depth, hsvFilter); + // blob image coordinates + arr u = getHsvBlobImageCoords(img, depth, hsvFilter, 0, histogram); if(!u.N){ rai::wait(); continue; } - // homogeneous + // blob homogeneous coordinates arr uHom = u; - makeHomogeneousImageCoordinate(uHom, img.d0); + makeHomogeneousImageCoordinate(uHom); - // camera coordinates assuming given intrinsics - arr xCam=u; - depthData2point(xCam, Fxypxy); //transforms the point to camera xyz coordinates + // blob camera coordinates, assuming given intrinsics + arr xCam = u; + depthData2point(xCam, fxycxy); //transforms the point to camera xyz coordinates - // camera coordinates from ground truth world coordinates - arr x = (dotWorld / camPose).getArr(); + // dot coordinates in cam/mount frame + arr x = (dotWorld / mountPose).getArr(); //camPose or mountPose - both work... - cout <<"blob: " <0, 1.); //collect data U.append(uHom); + x.append(1.); //works equally with or without appending 1... X.append(x); } + X.reshape(U.d0, -1); //-- multiple iterations - arr Pinv, K, R, t; + arr Pinv; for(uint k=0;k<5;k++){ Pinv = ~X * U * inverse_SymPosDef(~U*U); - decomposeInvProjectionMatrix(K, R, t, Pinv); for(uint i=0;i("hsvFilter"); + int checks = rai::getParameter("checks", 1); + + rai::Configuration C; + setupConfig(C); + + rai::Graph data("dat.g"); + + //make camera mount stable free joint + rai::Frame* cam = C["cameraWrist"]; + cam->setJoint(rai::JT_free); + cam->joint->isStable = true; + arr qCam = cam->joint->getDofState(); + C.report(); + + + //add a 'viewPoint' marker to the camera + rai::Frame* viewPoint = C.addFrame("viewPoint", cam->name); + viewPoint->setShape(rai::ST_marker, {.1}); + + //setup KOMO, one frame for each datapoint + KOMO komo(C, data.N, 1, 0, false); + komo.setupPathConfig(); + + //add objectives for each data point + arr histogram; + for(uint t=0;tgraph(); + + //set the configuration + arr q = dat.get("q"); + if(!t){ + komo.setConfiguration_qOrg(t, (q, qCam)); + }else{ + komo.setConfiguration_qOrg(t, q); + } + + //set the viewPoint + byteA img(dat.get("img")); + floatA depth(dat.get("depth")); + arr fxycxy = dat.get("camFxycxy"); + arr u = getHsvBlobImageCoords(img, depth, hsvFilter, 0, histogram); + arr xCam = u; + depthData2point(xCam, fxycxy); //transforms the point to camera xyz coordinates + komo.timeSlices(t, viewPoint->ID) ->setRelativePosition(xCam); + + cout <key; + n->key.resize(n->key.N-1, true); + + //add calibration objective + komo.addObjective({double(t+1)}, FS_positionDiff, {"viewPoint", n->key}, OT_eq, {1e0}); + } + + komo.pathConfig.selectJoints({komo.timeSlices(0, cam->ID)->joint}); + + komo.view(true, "before optim"); +// komo.pathConfig.animate(); + + cout <setShape(rai::ST_marker, {.1}); target->setColor({1.,.5,0.}); + C.view(true); + rai::Frame* cam = C["cameraWrist"]; arr hsvFilter = rai::getParameter("hsvFilter"); arr Pinv = rai::getParameter("Pinv"); int checks = rai::getParameter("checks", 1); - BotOp bot(C, rai::getParameter("real")); - - //pre motion - bot.gripperMove(rai::_left); - { - Move_IK move(bot, C, checks); - move().addObjective({}, FS_positionRel, {"dot0", cam->name}, OT_eq, {1e0}, {0.,0.,-.3}); - if(!move.go()) return; - } - - //fine motion - bot.gripperCloseGrasp(rai::_left, 0); - for(uint t=0;t<5;t++) bot.sync(C); - if(!sense_HsvBlob(bot, C, cam->name, "target", hsvFilter, Pinv, checks)) return; - { - Move_IK move(bot, C, checks); - move().addObjective({}, FS_vectorZ, {"l_gripper"}, OT_eq, {1e0}, {0.,0.,1.}); - move().addObjective({}, FS_positionRel, {"target", "l_gripper"}, OT_eq, {1e0}, {0.,0.,-.01}); - if(!move.go()) return; - } + BotOp bot(C, rai::getParameter("real", false)); + + for(uint i=0;;i++){ + rai::Frame *dot = C.getFrame(STRING("dot"<name, cam->name}, OT_eq, {1e0}, {0.,0.,.25}); + if(!move.go()) return; + } - //pre motion - if(!bot.wait(C)) return; - bot.gripperMove(rai::_left); - { - Move_IK move(bot, C, checks); - move().addObjective({}, FS_positionRel, {"dot1", cam->name}, OT_eq, {1e0}, {0.,0.,-.3}); - if(!move.go()) return; - } + //fine motion + bot.gripperMove(rai::_left, 0); + for(uint t=0;t<5;t++) bot.sync(C); + if(!sense_HsvBlob(bot, C, cam->name, "target", hsvFilter, Pinv, checks)) return; + { + Move_IK move(bot, C, checks); + move().addObjective({}, FS_vectorZ, {"l_gripper"}, OT_eq, {1e0}, {0.,0.,1.}); + move().addObjective({}, FS_positionDiff, {"l_gripper", "target"}, OT_eq, {1e0}, {0.,0., .01}); + if(!move.go()) return; + } - //fine motion - bot.gripperCloseGrasp(rai::_left, 0); - for(uint t=0;t<5;t++) bot.sync(C); - if(!sense_HsvBlob(bot, C, cam->name, "target", hsvFilter, Pinv, checks)) return; - { - Move_IK move(bot, C, checks); - move().addObjective({}, FS_vectorZ, {"l_gripper"}, OT_eq, {1e0}, {0.,0.,1.}); - move().addObjective({}, FS_positionRel, {"target", "l_gripper"}, OT_eq, {1e0}, {0.,0.,-.01}); - if(!move.go()) return; + bot.wait(C, true, false); + if(bot.keypressed=='q') break; } - if(!bot.wait(C)) return; bot.gripperMove(rai::_left); bot.home(C); } @@ -253,7 +359,7 @@ void checkTip(){ rai::Configuration C; C.addFile("station.g"); - BotOp bot(C, rai::getParameter("real")); + BotOp bot(C, rai::getParameter("real", false)); rai::Frame* tip = C["l_gripper"]; @@ -280,10 +386,11 @@ int main(int argc, char * argv[]){ // collectData(); // computeCalibration(); + komoCalibrate(); // selectHSV(); - demoCalibration(); +// demoCalibration(); // checkTip(); diff --git a/test/14-calibPoints/rai.cfg b/test/14-calibPoints/rai.cfg index 8b8701b..e3f30d9 100644 --- a/test/14-calibPoints/rai.cfg +++ b/test/14-calibPoints/rai.cfg @@ -1,4 +1,4 @@ -real +#real checks: 0 #bot/useOptitrack: true @@ -11,6 +11,9 @@ botsim/verbose: 0 #RealSense/exposure: 100 #RealSense/white: 2500 +dots: [ 0.176193, 0.104552, 0.0525, -0.300173, 0.108897, 0.0525] +views: 10 + #real day hsvFilter: [98, 80, 94, 133, 255, 255] @@ -19,6 +22,7 @@ hsvFilter: [98, 80, 94, hsvFilter: [77, 105, 66, 135, 255, 235] + #real Pinv: [] diff --git a/test/14-calibPoints/station.g b/test/14-calibPoints/station.g index b1f72d4..31f7e22 100644 --- a/test/14-calibPoints/station.g +++ b/test/14-calibPoints/station.g @@ -1,4 +1,4 @@ -Include: '../../rai-robotModels/scenarios/pandaSingle.g' +Include: <../../rai-robotModels/scenarios/pandaSingle.g> dot0 (table) { X:[0.176193, 0.104552, 0.6525], shape: cylinder, size: [.001 .02], color: [.2 .3 1] } dot1 (table) { X:[-0.300173, 0.108897, 0.6525], shape: cylinder, size: [.001 .02], color: [.2 .3 1] }