4#include <visp3/core/vpConfig.h>
6#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && defined(VISP_HAVE_OPENCV)
7#include <visp3/core/vpDisplay.h>
8#include <visp3/core/vpIoTools.h>
9#include <visp3/core/vpXmlParserCamera.h>
10#include <visp3/gui/vpDisplayGDI.h>
11#include <visp3/gui/vpDisplayOpenCV.h>
12#include <visp3/gui/vpDisplayX.h>
13#include <visp3/mbt/vpMbGenericTracker.h>
14#include <visp3/sensor/vpOccipitalStructure.h>
15#include <visp3/vision/vpKeyPoint.h>
17int main(
int argc,
char *argv[])
19 std::string config_color =
"", config_depth =
"";
20 std::string model_color =
"", model_depth =
"";
21 std::string init_file =
"";
22 bool use_ogre =
false;
23 bool use_scanline =
false;
24 bool use_edges =
true;
26 bool use_depth =
true;
28 bool auto_init =
false;
29 double proj_error_threshold = 25;
30 std::string learning_data =
"learning/data-learned.bin";
31 bool display_projection_error =
false;
33 for (
int i = 1; i < argc; i++) {
34 if (std::string(argv[i]) ==
"--config_color" && i + 1 < argc) {
35 config_color = std::string(argv[i + 1]);
36 }
else if (std::string(argv[i]) ==
"--config_depth" && i + 1 < argc) {
37 config_depth = std::string(argv[i + 1]);
38 }
else if (std::string(argv[i]) ==
"--model_color" && i + 1 < argc) {
39 model_color = std::string(argv[i + 1]);
40 }
else if (std::string(argv[i]) ==
"--model_depth" && i + 1 < argc) {
41 model_depth = std::string(argv[i + 1]);
42 }
else if (std::string(argv[i]) ==
"--init_file" && i + 1 < argc) {
43 init_file = std::string(argv[i + 1]);
44 }
else if (std::string(argv[i]) ==
"--proj_error_threshold" && i + 1 < argc) {
45 proj_error_threshold = std::atof(argv[i + 1]);
46 }
else if (std::string(argv[i]) ==
"--use_ogre") {
48 }
else if (std::string(argv[i]) ==
"--use_scanline") {
50 }
else if (std::string(argv[i]) ==
"--use_edges" && i + 1 < argc) {
51 use_edges = (std::atoi(argv[i + 1]) == 0 ? false :
true);
52 }
else if (std::string(argv[i]) ==
"--use_klt" && i + 1 < argc) {
53 use_klt = (std::atoi(argv[i + 1]) == 0 ? false :
true);
54 }
else if (std::string(argv[i]) ==
"--use_depth" && i + 1 < argc) {
55 use_depth = (std::atoi(argv[i + 1]) == 0 ? false :
true);
56 }
else if (std::string(argv[i]) ==
"--learn") {
58 }
else if (std::string(argv[i]) ==
"--learning_data" && i + 1 < argc) {
59 learning_data = argv[i + 1];
60 }
else if (std::string(argv[i]) ==
"--auto_init") {
62 }
else if (std::string(argv[i]) ==
"--display_proj_error") {
63 display_projection_error =
true;
64 }
else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
65 std::cout <<
"Usage: \n"
67 <<
" [--model_color <object.cao>] [--model_depth <object.cao>]"
68 " [--config_color <object.xml>] [--config_depth <object.xml>]"
69 " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
70 " [--proj_error_threshold <threshold between 0 and 90> (default: "
71 << proj_error_threshold
73 " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
74 " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
75 " [--display_proj_error]"
78 std::cout <<
"\n** How to track a 4.2 cm width cube with manual initialization:\n"
79 << argv[0] <<
" --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
80 std::cout <<
"\n** How to learn the cube and create a learning database:\n"
81 << argv[0] <<
" --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
83 std::cout <<
"\n** How to track the cube with initialization from learning database:\n"
84 << argv[0] <<
" --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
91 if (model_depth.empty()) {
92 model_depth = model_color;
95 if (config_color.empty()) {
96 config_color = (parentname.empty() ?
"" : (parentname +
"/")) +
vpIoTools::getNameWE(model_color) +
".xml";
98 if (config_depth.empty()) {
99 config_depth = (parentname.empty() ?
"" : (parentname +
"/")) +
vpIoTools::getNameWE(model_color) +
"_depth.xml";
101 if (init_file.empty()) {
102 init_file = (parentname.empty() ?
"" : (parentname +
"/")) +
vpIoTools::getNameWE(model_color) +
".init";
104 std::cout <<
"Tracked features: " << std::endl;
105 std::cout <<
" Use edges : " << use_edges << std::endl;
106 std::cout <<
" Use klt : " << use_klt << std::endl;
107 std::cout <<
" Use depth : " << use_depth << std::endl;
108 std::cout <<
"Tracker options: " << std::endl;
109 std::cout <<
" Use ogre : " << use_ogre << std::endl;
110 std::cout <<
" Use scanline: " << use_scanline << std::endl;
111 std::cout <<
" Proj. error : " << proj_error_threshold << std::endl;
112 std::cout <<
" Display proj. error: " << display_projection_error << std::endl;
113 std::cout <<
"Config files: " << std::endl;
114 std::cout <<
" Config color: "
115 <<
"\"" << config_color <<
"\"" << std::endl;
116 std::cout <<
" Config depth: "
117 <<
"\"" << config_depth <<
"\"" << std::endl;
118 std::cout <<
" Model color : "
119 <<
"\"" << model_color <<
"\"" << std::endl;
120 std::cout <<
" Model depth : "
121 <<
"\"" << model_depth <<
"\"" << std::endl;
122 std::cout <<
" Init file : "
123 <<
"\"" << init_file <<
"\"" << std::endl;
124 std::cout <<
"Learning options : " << std::endl;
125 std::cout <<
" Learn : " << learn << std::endl;
126 std::cout <<
" Auto init : " << auto_init << std::endl;
127 std::cout <<
" Learning data: " << learning_data << std::endl;
129 if (!use_edges && !use_klt && !use_depth) {
130 std::cout <<
"You must choose at least one visual features between edge, KLT and depth." << std::endl;
134 if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
135 std::cout <<
"config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
142 ST::CaptureSessionSettings settings;
143 settings.source = ST::CaptureSessionSourceId::StructureCore;
144 settings.structureCore.visibleEnabled =
true;
145 settings.applyExpensiveCorrection =
true;
150 std::cout <<
"Catch an exception: " << e.
what() << std::endl;
151 std::cout <<
"Check if the Structure Core camera is connected..." << std::endl;
159 std::cout <<
"Sensor internal camera parameters for color camera: " << cam_color << std::endl;
160 std::cout <<
"Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
166 unsigned int _posx = 100, _posy = 50;
170#elif defined(VISP_HAVE_GDI)
172#elif defined(HAVE_OPENCV_HIGHGUI)
175 if (use_edges || use_klt)
176 d1.
init(I_gray, _posx, _posy,
"Color stream");
178 d2.
init(I_depth, _posx + I_gray.getWidth() + 10, _posy,
"Depth stream");
181 sc.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, NULL, NULL, NULL);
183 if (use_edges || use_klt) {
205 std::vector<int> trackerTypes;
206 if (use_edges && use_klt)
218 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
219 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
220 std::map<std::string, std::string> mapOfInitFiles;
221 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
222 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
223 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
225 std::vector<vpColVector> pointcloud;
229 if ((use_edges || use_klt) && use_depth) {
230 tracker.loadConfigFile(config_color, config_depth);
231 tracker.loadModel(model_color, model_depth);
232 std::cout <<
"Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
233 mapOfCameraTransformations[
"Camera2"] = depth_M_color;
234 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
235 mapOfImages[
"Camera1"] = &I_gray;
236 mapOfImages[
"Camera2"] = &I_depth;
237 mapOfInitFiles[
"Camera1"] = init_file;
238 tracker.setCameraParameters(cam_color, cam_depth);
239 }
else if (use_edges || use_klt) {
240 tracker.loadConfigFile(config_color);
241 tracker.loadModel(model_color);
242 tracker.setCameraParameters(cam_color);
243 }
else if (use_depth) {
244 tracker.loadConfigFile(config_depth);
245 tracker.loadModel(model_depth);
246 tracker.setCameraParameters(cam_depth);
249 tracker.setDisplayFeatures(
true);
250 tracker.setOgreVisibilityTest(use_ogre);
251 tracker.setScanLineVisibilityTest(use_scanline);
252 tracker.setProjectionErrorComputation(
true);
253 tracker.setProjectionErrorDisplay(display_projection_error);
255#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
256 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
257 std::string detectorName =
"SIFT";
258 std::string extractorName =
"SIFT";
259 std::string matcherName =
"BruteForce";
261 std::string detectorName =
"FAST";
262 std::string extractorName =
"ORB";
263 std::string matcherName =
"BruteForce-Hamming";
266 if (learn || auto_init) {
270#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
271#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
272 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
274 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
276 orb_detector->setNLevels(1);
284 std::cout <<
"Cannot enable auto detection. Learning file \"" << learning_data <<
"\" doesn't exist" << std::endl;
289 if ((use_edges || use_klt) && use_depth)
290 tracker.initClick(mapOfImages, mapOfInitFiles,
true);
291 else if (use_edges || use_klt)
292 tracker.initClick(I_gray, init_file,
true);
294 tracker.initClick(I_depth, init_file,
true);
300 bool run_auto_init =
false;
302 run_auto_init =
true;
304 std::vector<double> times_vec;
310 bool learn_position =
false;
316 bool tracking_failed =
false;
319 sc.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, &pointcloud);
321 if (use_edges || use_klt || run_auto_init) {
330 if ((use_edges || use_klt) && use_depth) {
331 mapOfImages[
"Camera1"] = &I_gray;
332 mapOfPointclouds[
"Camera2"] = &pointcloud;
333 mapOfWidths[
"Camera2"] = width;
334 mapOfHeights[
"Camera2"] = height;
335 }
else if (use_edges || use_klt) {
336 mapOfImages[
"Camera"] = &I_gray;
337 }
else if (use_depth) {
338 mapOfPointclouds[
"Camera"] = &pointcloud;
339 mapOfWidths[
"Camera"] = width;
340 mapOfHeights[
"Camera"] = height;
345 if (keypoint.
matchPoint(I_gray, cam_color, cMo)) {
346 std::cout <<
"Auto init succeed" << std::endl;
347 if ((use_edges || use_klt) && use_depth) {
348 mapOfCameraPoses[
"Camera1"] = cMo;
349 mapOfCameraPoses[
"Camera2"] = depth_M_color * cMo;
350 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
351 }
else if (use_edges || use_klt) {
352 tracker.initFromPose(I_gray, cMo);
353 }
else if (use_depth) {
354 tracker.initFromPose(I_depth, depth_M_color * cMo);
357 if (use_edges || use_klt) {
371 tracker.setDisplayFeatures(
true);
373 run_auto_init =
false;
375 if ((use_edges || use_klt) && use_depth) {
376 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
377 }
else if (use_edges || use_klt) {
378 tracker.track(I_gray);
379 }
else if (use_depth) {
380 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
384 tracking_failed =
true;
386 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
387 run_auto_init =
true;
392 cMo = tracker.getPose();
395 double proj_error = 0;
398 proj_error = tracker.getProjectionError();
400 proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
403 if (auto_init && proj_error > proj_error_threshold) {
404 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
405 run_auto_init =
true;
406 tracking_failed =
true;
410 if (!tracking_failed) {
412 tracker.setDisplayFeatures(
true);
414 if ((use_edges || use_klt) && use_depth) {
415 tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
418 }
else if (use_edges || use_klt) {
419 tracker.display(I_gray, cMo, cam_color,
vpColor::red, 3);
421 }
else if (use_depth) {
422 tracker.display(I_depth, cMo, cam_depth,
vpColor::red, 3);
427 std::stringstream ss;
428 ss <<
"Nb features: " << tracker.getError().size();
432 std::stringstream ss;
433 ss <<
"Features: edges " << tracker.getNbFeaturesEdge() <<
", klt " << tracker.getNbFeaturesKlt()
434 <<
", depth " << tracker.getNbFeaturesDepthDense();
439 std::stringstream ss;
440 ss <<
"Loop time: " << loop_t <<
" ms";
443 if (use_edges || use_klt) {
458 learn_position =
true;
460 run_auto_init =
true;
474 if (learn_position) {
476 std::vector<cv::KeyPoint> trainKeyPoints;
477 keypoint.
detect(I_gray, trainKeyPoints);
480 std::vector<vpPolygon> polygons;
481 std::vector<std::vector<vpPoint> > roisPt;
482 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
483 polygons = pair.first;
484 roisPt = pair.second;
487 std::vector<cv::Point3f> points3f;
491 keypoint.
buildReference(I_gray, trainKeyPoints, points3f,
true, learn_id++);
494 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
497 learn_position =
false;
498 std::cout <<
"Data learned" << std::endl;
501 times_vec.push_back(loop_t);
504 std::cout <<
"Save learning file: " << learning_data << std::endl;
508 std::cout <<
"Catch an exception: " << e.
what() << std::endl;
511 if (!times_vec.empty()) {
519#elif defined(VISP_HAVE_OCCIPITAL_STRUCTURE)
522 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
528 std::cout <<
"Install libStructure 3rd party, configure and build ViSP again to use this example" << std::endl;
Generic class defining intrinsic camera parameters.
static const vpColor none
static const vpColor yellow
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const std::string & getStringMessage() const
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
unsigned int buildReference(const vpImage< unsigned char > &I)
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
Real-time 6D object pose tracking using its CAD model.
unsigned int getHeight(vpOccipitalStructureStream stream_type)
vpCameraParameters getCameraParameters(const vpOccipitalStructureStream stream_type, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithoutDistortion)
void acquire(vpImage< unsigned char > &gray, bool undistorted=false, double *ts=NULL)
unsigned int getWidth(vpOccipitalStructureStream stream_type)
vpHomogeneousMatrix getTransform(const vpOccipitalStructureStream from, const vpOccipitalStructureStream to)
bool open(const ST::CaptureSessionSettings &settings)
VISP_EXPORT double measureTimeMs()