Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
mbtGenericTrackingDepth.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See https://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Example of tracking with vpGenericTracker on Castel.
33 *
34*****************************************************************************/
35
42#include <cstdlib>
43#include <iostream>
44#include <visp3/core/vpConfig.h>
45
46#if (defined(VISP_HAVE_MODULE_MBT) && defined(VISP_HAVE_DISPLAY)) && \
47 (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
48
49#include <visp3/core/vpDebug.h>
50#include <visp3/core/vpHomogeneousMatrix.h>
51#include <visp3/core/vpIoTools.h>
52#include <visp3/core/vpMath.h>
53#include <visp3/gui/vpDisplayD3D.h>
54#include <visp3/gui/vpDisplayGDI.h>
55#include <visp3/gui/vpDisplayGTK.h>
56#include <visp3/gui/vpDisplayOpenCV.h>
57#include <visp3/gui/vpDisplayX.h>
58#include <visp3/io/vpImageIo.h>
59#include <visp3/io/vpParseArgv.h>
60#include <visp3/io/vpVideoReader.h>
61#include <visp3/mbt/vpMbGenericTracker.h>
62
63#define GETOPTARGS "x:X:m:M:i:n:dchfolwvpt:T:e:"
64
65#define USE_XML 1
66#define USE_SMALL_DATASET 1 // small depth dataset in ViSP-images
67
68namespace
69{
70void usage(const char *name, const char *badparam)
71{
72 fprintf(stdout, "\n\
73 Example of tracking with vpGenericTracker.\n\
74 \n\
75 SYNOPSIS\n\
76 %s [-i <test image path>] [-x <config file>] [-X <config file depth>]\n\
77 [-m <model name>] [-M <model name depth>] [-n <initialisation file base name>]\n\
78 [-f] [-c] [-d] [-h] [-o] [-w] [-l] [-v] [-p]\n\
79 [-t <tracker type>] [-T <tracker type>] [-e <last frame index>]\n",
80 name);
81
82 fprintf(stdout, "\n\
83 OPTIONS: \n\
84 -i <input image path> \n\
85 Set image input path.\n\
86 These images come from visp-images-x.y.z.tar.gz available \n\
87 on the ViSP website.\n\
88 Setting the VISP_INPUT_IMAGE_PATH environment\n\
89 variable produces the same behavior than using\n\
90 this option.\n\
91 \n\
92 -x <config file> \n\
93 Set the config file (the xml file) to use.\n\
94 The config file is used to specify the parameters of the tracker.\n\
95 \n\
96 -X <config file> \n\
97 Set the config file (the xml file) to use for the depth sensor.\n\
98 The config file is used to specify the parameters of the tracker.\n\
99 \n\
100 -m <model name> \n\
101 Specify the name of the file of the model.\n\
102 The model can either be a vrml model (.wrl) or a .cao file.\n\
103 \n\
104 -M <model name> \n\
105 Specify the name of the file of the model for the depth sensor.\n\
106 The model can either be a vrml model (.wrl) or a .cao file.\n\
107 \n\
108 -n <initialisation file base name> \n\
109 Base name of the initialisation file. The file will be 'base_name'.init .\n\
110 This base name is also used for the optional picture specifying where to \n\
111 click (a .ppm picture).\n\
112 \n\
113 -f \n\
114 Turn off the display of the the moving edges and Klt points. \n\
115 \n\
116 -d \n\
117 Turn off the display.\n\
118 \n\
119 -c\n\
120 Disable the mouse click. Useful to automate the \n\
121 execution of this program without human intervention.\n\
122 \n\
123 -o\n\
124 Use Ogre3D for visibility tests\n\
125 \n\
126 -w\n\
127 When Ogre3D is enable [-o] show Ogre3D configuration dialog that allows to set the renderer.\n\
128 \n\
129 -l\n\
130 Use the scanline for visibility tests.\n\
131 \n\
132 -v\n\
133 Compute covariance matrix.\n\
134 \n\
135 -p\n\
136 Compute gradient projection error.\n\
137 \n\
138 -t <tracker type>\n\
139 Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
140 \n\
141 -T <tracker type>\n\
142 Set tracker type (<4 (Depth normal)>, <8 (Depth dense)>, <12 (both)>) for depth sensor.\n\
143 \n\
144 -e <last frame index>\n\
145 Specify the index of the last frame. Once reached, the tracking is stopped.\n\
146 \n\
147 -h \n\
148 Print the help.\n\n");
149
150 if (badparam)
151 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
152}
153
154bool getOptions(int argc, const char **argv, std::string &ipath, std::string &configFile, std::string &configFile_depth,
155 std::string &modelFile, std::string &modelFile_depth, std::string &initFile, bool &displayFeatures,
156 bool &click_allowed, bool &display, bool &useOgre, bool &showOgreConfigDialog, bool &useScanline,
157 bool &computeCovariance, bool &projectionError, int &trackerType, int &tracker_type_depth,
158 int &lastFrame)
159{
160 const char *optarg_;
161 int c;
162 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
163
164 switch (c) {
165 case 'i':
166 ipath = optarg_;
167 break;
168 case 'x':
169 configFile = optarg_;
170 break;
171 case 'X':
172 configFile_depth = optarg_;
173 break;
174 case 'm':
175 modelFile = optarg_;
176 break;
177 case 'M':
178 modelFile_depth = optarg_;
179 break;
180 case 'n':
181 initFile = optarg_;
182 break;
183 case 'f':
184 displayFeatures = false;
185 break;
186 case 'c':
187 click_allowed = false;
188 break;
189 case 'd':
190 display = false;
191 break;
192 case 'o':
193 useOgre = true;
194 break;
195 case 'l':
196 useScanline = true;
197 break;
198 case 'w':
199 showOgreConfigDialog = true;
200 break;
201 case 'v':
202 computeCovariance = true;
203 break;
204 case 'p':
205 projectionError = true;
206 break;
207 case 't':
208 trackerType = atoi(optarg_);
209 break;
210 case 'T':
211 tracker_type_depth = atoi(optarg_);
212 break;
213 case 'e':
214 lastFrame = atoi(optarg_);
215 break;
216 case 'h':
217 usage(argv[0], NULL);
218 return false;
219 break;
220
221 default:
222 usage(argv[0], optarg_);
223 return false;
224 break;
225 }
226 }
227
228 if ((c == 1) || (c == -1)) {
229 // standalone param or error
230 usage(argv[0], NULL);
231 std::cerr << "ERROR: " << std::endl;
232 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
233 return false;
234 }
235
236 return true;
237}
238
239struct rs_intrinsics
240{
241 float ppx;
243 float ppy;
245 float fx;
247 float fy;
249 float coeffs[5];
250};
251
252void rs_deproject_pixel_to_point(float point[3], const rs_intrinsics &intrin, const float pixel[2], float depth)
253{
254 float x = (pixel[0] - intrin.ppx) / intrin.fx;
255 float y = (pixel[1] - intrin.ppy) / intrin.fy;
256
257 float r2 = x * x + y * y;
258 float f = 1 + intrin.coeffs[0] * r2 + intrin.coeffs[1] * r2 * r2 + intrin.coeffs[4] * r2 * r2 * r2;
259 float ux = x * f + 2 * intrin.coeffs[2] * x * y + intrin.coeffs[3] * (r2 + 2 * x * x);
260 float uy = y * f + 2 * intrin.coeffs[3] * x * y + intrin.coeffs[2] * (r2 + 2 * y * y);
261
262 x = ux;
263 y = uy;
264
265 point[0] = depth * x;
266 point[1] = depth * y;
267 point[2] = depth;
268}
269
270bool read_data(unsigned int cpt, const std::string &input_directory, vpImage<unsigned char> &I,
271 vpImage<uint16_t> &I_depth_raw, std::vector<vpColVector> &pointcloud, unsigned int &pointcloud_width,
272 unsigned int &pointcloud_height)
273{
274#if VISP_HAVE_DATASET_VERSION >= 0x030600
275 std::string ext("png");
276#else
277 std::string ext("pgm");
278#endif
279 // Read image
280 std::stringstream ss;
281 ss << input_directory << "/image_";
282 ss << std::setfill('0') << std::setw(4);
283 ss << cpt;
284 ss << ".";
285 ss << ext;
286 std::string filename_image = ss.str();
287 if (!vpIoTools::checkFilename(filename_image)) {
288 std::cerr << "Cannot read: " << filename_image << std::endl;
289 return false;
290 }
291 vpImageIo::read(I, filename_image);
292
293 // Read raw depth
294 ss.str("");
295 ss << input_directory << "/depth_image_";
296 ss << std::setfill('0') << std::setw(4);
297 ss << cpt;
298 ss << ".bin";
299 std::string filename_depth = ss.str();
300
301 std::ifstream file_depth(filename_depth.c_str(), std::ios::in | std::ios::binary);
302 if (!file_depth.is_open()) {
303 return false;
304 }
305
306 unsigned int height = 0, width = 0;
307 vpIoTools::readBinaryValueLE(file_depth, height);
308 vpIoTools::readBinaryValueLE(file_depth, width);
309
310 I_depth_raw.resize(height, width);
311
312 uint16_t depth_value = 0;
313 for (unsigned int i = 0; i < height; i++) {
314 for (unsigned int j = 0; j < width; j++) {
315 vpIoTools::readBinaryValueLE(file_depth, depth_value);
316 I_depth_raw[i][j] = depth_value;
317 }
318 }
319
320 // Transform pointcloud
321 pointcloud_width = width;
322 pointcloud_height = height;
323 pointcloud.resize((size_t)width * height);
324
325 // Only for Creative SR300
326 const float depth_scale = 0.000124986647f;
327 rs_intrinsics depth_intrinsic;
328 depth_intrinsic.ppx = 311.484558f;
329 depth_intrinsic.ppy = 246.283234f;
330 depth_intrinsic.fx = 476.053619f;
331 depth_intrinsic.fy = 476.053497f;
332 depth_intrinsic.coeffs[0] = 0.165056542f;
333 depth_intrinsic.coeffs[1] = -0.0508309528f;
334 depth_intrinsic.coeffs[2] = 0.00435937941f;
335 depth_intrinsic.coeffs[3] = 0.00541406544f;
336 depth_intrinsic.coeffs[4] = 0.250085592f;
337
338 for (unsigned int i = 0; i < height; i++) {
339 for (unsigned int j = 0; j < width; j++) {
340 float scaled_depth = I_depth_raw[i][j] * depth_scale;
341 float point[3];
342 float pixel[2] = { (float)j, (float)i };
343 rs_deproject_pixel_to_point(point, depth_intrinsic, pixel, scaled_depth);
344
345 vpColVector data_3D(3);
346 data_3D[0] = point[0];
347 data_3D[1] = point[1];
348 data_3D[2] = point[2];
349
350 pointcloud[(size_t)(i * width + j)] = data_3D;
351 }
352 }
353
354 return true;
355}
356
357void loadConfiguration(vpMbTracker *const tracker,
358 const std::string &
359#if USE_XML
360 configFile
361#endif
362 ,
363 const std::string &
364#if USE_XML
365 configFile_depth
366#endif
367)
368{
369#if USE_XML
370 // From the xml file
371 dynamic_cast<vpMbGenericTracker *>(tracker)->loadConfigFile(configFile, configFile_depth);
372#else
373 // Edge
374 vpMe me;
375 me.setMaskSize(5);
376 me.setMaskNumber(180);
377 me.setRange(8);
379 me.setThreshold(10);
380 me.setMu1(0.5);
381 me.setMu2(0.5);
382 me.setSampleStep(4);
383 dynamic_cast<vpMbGenericTracker *>(tracker)->setMovingEdge(me);
384
385 // Klt
386#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
387 vpKltOpencv klt;
388 klt.setMaxFeatures(10000);
389 klt.setWindowSize(5);
390 klt.setQuality(0.01);
391 klt.setMinDistance(5);
392 klt.setHarrisFreeParameter(0.02);
393 klt.setBlockSize(3);
394 klt.setPyramidLevels(3);
395
396 dynamic_cast<vpMbGenericTracker *>(tracker)->setKltOpencv(klt);
397 dynamic_cast<vpMbGenericTracker *>(tracker)->setKltMaskBorder(5);
398#endif
399
400 // Depth
401 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalFeatureEstimationMethod(
403 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationMethod(2);
404 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationRansacMaxIter(200);
405 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
406 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalSamplingStep(2, 2);
407
408 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthDenseSamplingStep(4, 4);
409
410 vpCameraParameters cam1, cam2;
411 cam1.initPersProjWithoutDistortion(615.1674804688, 615.1675415039, 312.1889953613, 243.4373779297);
412 cam2.initPersProjWithoutDistortion(476.0536193848, 476.0534973145, 311.4845581055, 246.2832336426);
413
414 dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraParameters(cam1, cam2);
415
416 tracker->setAngleAppear(vpMath::rad(70));
417 tracker->setAngleDisappear(vpMath::rad(80));
418
419 // Specify the clipping to
420 tracker->setNearClippingDistance(0.01);
421 tracker->setFarClippingDistance(2.0);
423 // tracker->setClipping(tracker->getClipping() | vpMbtPolygon::LEFT_CLIPPING
424 // | vpMbtPolygon::RIGHT_CLIPPING | vpMbtPolygon::UP_CLIPPING |
425 // vpMbtPolygon::DOWN_CLIPPING); // Equivalent to FOV_CLIPPING
426#endif
427}
428} // namespace
429
430int main(int argc, const char **argv)
431{
432 try {
433 std::string env_ipath;
434 std::string opt_ipath;
435 std::string ipath;
436 std::string opt_configFile;
437 std::string opt_configFile_depth;
438 std::string opt_modelFile;
439 std::string opt_modelFile_depth;
440 std::string opt_initFile;
441 std::string initFile;
442 bool displayFeatures = true;
443 bool opt_click_allowed = true;
444 bool opt_display = true;
445 bool useOgre = false;
446 bool showOgreConfigDialog = false;
447 bool useScanline = false;
448 bool computeCovariance = false;
449 bool projectionError = false;
450 int trackerType_image = vpMbGenericTracker::EDGE_TRACKER;
451 int trackerType_depth = vpMbGenericTracker::DEPTH_DENSE_TRACKER;
452#if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
453 // To avoid Debian test timeout
454 int opt_lastFrame = 5;
455#else
456 int opt_lastFrame = -1;
457#endif
458
459 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
460 // environment variable value
462
463 // Set the default input path
464 if (!env_ipath.empty())
465 ipath = env_ipath;
466
467 // Read the command line options
468 if (!getOptions(argc, argv, opt_ipath, opt_configFile, opt_configFile_depth, opt_modelFile, opt_modelFile_depth,
469 opt_initFile, displayFeatures, opt_click_allowed, opt_display, useOgre, showOgreConfigDialog,
470 useScanline, computeCovariance, projectionError, trackerType_image, trackerType_depth,
471 opt_lastFrame)) {
472 return EXIT_FAILURE;
473 }
474
475#if !(defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO))
476 if (trackerType_image == 2 || trackerType_image == 3) { // Use vpMbGenericTracker::KLT_TRACKER
477 std::cout << "KLT features cannot be used: ViSP is not built with "
478 "KLT module or OpenCV imgproc and video modules are not available."
479 << std::endl;
480 return EXIT_SUCCESS;
481 }
482#endif
483
484 // Test if an input path is set
485 if (opt_ipath.empty() && env_ipath.empty()) {
486 usage(argv[0], NULL);
487 std::cerr << std::endl << "ERROR:" << std::endl;
488 std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
489 << " environment variable to specify the location of the " << std::endl
490 << " image path where test images are located." << std::endl
491 << std::endl;
492
493 return EXIT_FAILURE;
494 }
495
496 // Get the option values
497 ipath = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/castel");
498
499 std::string dir_path = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth");
500 if (!vpIoTools::checkDirectory(dir_path)) {
501 std::cerr << "ViSP-images does not contain the folder: " << dir_path << "!" << std::endl;
502 return EXIT_SUCCESS;
503 }
504
505 std::string configFile, configFile_depth;
506 if (!opt_configFile.empty())
507 configFile = opt_configFile;
508 else
509 configFile =
510 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.xml");
511
512 if (!opt_configFile_depth.empty())
513 configFile_depth = opt_configFile_depth;
514 else
515 configFile_depth =
516 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau_depth.xml");
517
518 std::string modelFile, modelFile_depth;
519 if (!opt_modelFile.empty())
520 modelFile = opt_modelFile;
521 else {
522#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
523 modelFile =
524 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau_gantry.wrl");
525#else
526 modelFile = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.cao");
527#endif
528 }
529
530 if (!opt_modelFile_depth.empty())
531 modelFile_depth = opt_modelFile_depth;
532 else
533 modelFile_depth =
534 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.cao");
535
536 std::string vrml_ext = ".wrl";
537 bool use_vrml =
538 (modelFile.compare(modelFile.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0) ||
539 (modelFile_depth.compare(modelFile_depth.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0);
540
541 if (use_vrml) {
542#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
543 std::cout << "use_vrml: " << use_vrml << std::endl;
544#else
545 std::cerr << "Error: vrml model file is only supported if ViSP is "
546 "build with Coin3D 3rd party"
547 << std::endl;
548 return EXIT_FAILURE;
549#endif
550 }
551
552 if (!opt_initFile.empty())
553 initFile = opt_initFile;
554 else
555 initFile = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.init");
556
557 vpImage<unsigned char> I, I_depth;
558 vpImage<uint16_t> I_depth_raw;
559 std::vector<vpColVector> pointcloud;
560 unsigned int pointcloud_width, pointcloud_height;
561 if (!read_data(0, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height)) {
562 std::cerr << "Cannot open sequence: " << ipath << std::endl;
563 return EXIT_FAILURE;
564 }
565
566 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
567
568 // initialise a display
569#if defined(VISP_HAVE_X11)
570 vpDisplayX display1, display2;
571#elif defined(VISP_HAVE_GDI)
572 vpDisplayGDI display1, display2;
573#elif defined(HAVE_OPENCV_HIGHGUI)
574 vpDisplayOpenCV display1, display2;
575#elif defined(VISP_HAVE_D3D9)
576 vpDisplayD3D display1, display2;
577#elif defined(VISP_HAVE_GTK)
578 vpDisplayGTK display1, display2;
579#else
580 opt_display = false;
581#endif
582 if (opt_display) {
583#if defined(VISP_HAVE_DISPLAY)
586 display1.init(I, 100, 100, "Test tracking (Left)");
587 display2.init(I_depth, (int)(I.getWidth() / vpDisplay::getDownScalingFactor(I)) + 110, 100,
588 "Test tracking (Right)");
589#endif
591 vpDisplay::display(I_depth);
593 vpDisplay::flush(I_depth);
594 }
595
596 std::vector<int> trackerTypes(2);
597 trackerTypes[0] = trackerType_image;
598 trackerTypes[1] = trackerType_depth;
599 // Object pointer to check that inheritance is ok
600 vpMbTracker *tracker = new vpMbGenericTracker(trackerTypes);
601 vpHomogeneousMatrix c1Mo, c2Mo;
602 vpCameraParameters cam1, cam2;
603
604 loadConfiguration(tracker, configFile, configFile_depth);
605
606 vpHomogeneousMatrix depth_M_color;
607 std::string depth_M_color_filename =
608 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/depth_M_color.txt");
609 {
610 std::ifstream depth_M_color_file(depth_M_color_filename.c_str());
611 depth_M_color.load(depth_M_color_file);
612 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformationMatrices;
613 mapOfCameraTransformationMatrices["Camera2"] = depth_M_color;
614 dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraTransformationMatrix(mapOfCameraTransformationMatrices);
615 }
616
617 // Display the moving edges, and the Klt points
618 tracker->setDisplayFeatures(displayFeatures);
619
620 // Tells if the tracker has to use Ogre3D for visibility tests
621 tracker->setOgreVisibilityTest(useOgre);
622 if (useOgre)
623 tracker->setOgreShowConfigDialog(showOgreConfigDialog);
624
625 // Tells if the tracker has to use the scanline visibility tests
626 tracker->setScanLineVisibilityTest(useScanline);
627
628 // Tells if the tracker has to compute the covariance matrix
629 tracker->setCovarianceComputation(computeCovariance);
630
631 // Tells if the tracker has to compute the projection error
632 tracker->setProjectionErrorComputation(projectionError);
633
634 // Retrieve the camera parameters from the tracker
635 dynamic_cast<vpMbGenericTracker *>(tracker)->getCameraParameters(cam1, cam2);
636
637 // Loop to position the cube
638 if (opt_display && opt_click_allowed) {
639 while (!vpDisplay::getClick(I, false)) {
641 vpDisplay::displayText(I, 15, 10, "click after positioning the object", vpColor::red);
643 }
644 }
645
646 // Load the 3D model (either a vrml file or a .cao file)
647 dynamic_cast<vpMbGenericTracker *>(tracker)->loadModel(modelFile, modelFile_depth);
648
649 if (opt_display && opt_click_allowed) {
650 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
651 mapOfImages["Camera1"] = &I;
652 mapOfImages["Camera2"] = &I_depth;
653 std::map<std::string, std::string> mapOfInitFiles;
654 mapOfInitFiles["Camera1"] = initFile;
655
656 // Initialise the tracker by clicking on the image
657 dynamic_cast<vpMbGenericTracker *>(tracker)->initClick(mapOfImages, mapOfInitFiles, true);
658 dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
659 // display the 3D model at the given pose
660 dynamic_cast<vpMbGenericTracker *>(tracker)->display(I, I_depth, c1Mo, c2Mo, cam1, cam2, vpColor::red);
661 }
662 else {
663 vpHomogeneousMatrix c1Moi(0.06846423368, 0.09062570884, 0.3401096693, -2.671882598, 0.1174275908, -0.6011935263);
664 vpHomogeneousMatrix c2Moi(0.04431452054, 0.09294637757, 0.3357760654, -2.677922443, 0.121297639, -0.6028463357);
665 dynamic_cast<vpMbGenericTracker *>(tracker)->initFromPose(I, I_depth, c1Moi, c2Moi);
666 }
667
668 // track the model
669 {
670 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
671 mapOfImages["Camera1"] = &I;
672 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
673 mapOfPointclouds["Camera2"] = &pointcloud;
674 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
675 mapOfWidths["Camera2"] = pointcloud_width;
676 mapOfHeights["Camera2"] = pointcloud_height;
677
678 dynamic_cast<vpMbGenericTracker *>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
679 }
680 dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
681
682 if (opt_display) {
684 vpDisplay::flush(I_depth);
685 }
686
687 bool quit = false, click = false;
688 unsigned int frame_index = 0;
689 std::vector<double> time_vec;
690 while (read_data(frame_index, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height) && !quit &&
691 (opt_lastFrame > 0 ? (int)frame_index <= opt_lastFrame : true)) {
692 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
693
694 if (opt_display) {
696 vpDisplay::display(I_depth);
697
698 std::stringstream ss;
699 ss << "Num frame: " << frame_index;
700 vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
701 }
702
703 // Test reset the tracker
704 if (frame_index == 10) {
705 std::cout << "----------Test reset tracker----------" << std::endl;
706 if (opt_display) {
708 vpDisplay::display(I_depth);
709 }
710
711 tracker->resetTracker();
712
713 loadConfiguration(tracker, configFile, configFile_depth);
714 dynamic_cast<vpMbGenericTracker *>(tracker)->loadModel(modelFile, modelFile_depth);
715 dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraParameters(cam1, cam2);
716 tracker->setOgreVisibilityTest(useOgre);
717 tracker->setScanLineVisibilityTest(useScanline);
718 tracker->setCovarianceComputation(computeCovariance);
719 tracker->setProjectionErrorComputation(projectionError);
720 dynamic_cast<vpMbGenericTracker *>(tracker)->initFromPose(I, I_depth, c1Mo, c2Mo);
721 }
722
723 // Test to set an initial pose
724#if USE_SMALL_DATASET
725 if (frame_index == 20) {
726 c1Mo.buildFrom(0.07734634051, 0.08993639906, 0.342344402, -2.708409543, 0.0669276477, -0.3798958303);
727 c2Mo.buildFrom(0.05319520317, 0.09223511976, 0.3380095812, -2.71438192, 0.07141055397, -0.3810081638);
728#else
729 if (frame_index == 50) {
730 c1Mo.buildFrom(0.09280663035, 0.09277655672, 0.330415149, -2.724431817, 0.0293932671, 0.02027966377);
731 c2Mo.buildFrom(0.06865933578, 0.09494713501, 0.3260555142, -2.730027451, 0.03498390135, 0.01989831338);
732#endif
733 std::cout << "Test set pose" << std::endl;
734 dynamic_cast<vpMbGenericTracker *>(tracker)->setPose(I, I_depth, c1Mo, c2Mo);
735 }
736
737#if USE_SMALL_DATASET
738 // track the object: stop tracking from frame 15 to 20
739 if (frame_index < 15 || frame_index >= 20) {
740#else
741 // track the object: stop tracking from frame 30 to 50
742 if (frame_index < 30 || frame_index >= 50) {
743#endif
744 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
745 mapOfImages["Camera1"] = &I;
746 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
747 mapOfPointclouds["Camera2"] = &pointcloud;
748 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
749 mapOfWidths["Camera2"] = pointcloud_width;
750 mapOfHeights["Camera2"] = pointcloud_height;
751
752 double t = vpTime::measureTimeMs();
753 dynamic_cast<vpMbGenericTracker *>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
754 t = vpTime::measureTimeMs() - t;
755 time_vec.push_back(t);
756
757 dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
758
759 if (opt_display) {
760 // display the 3D model
761 dynamic_cast<vpMbGenericTracker *>(tracker)->display(I, I_depth, c1Mo, c2Mo, cam1, cam2, vpColor::darkRed);
762 // display the frame
763 vpDisplay::displayFrame(I, c1Mo, cam1, 0.05);
764 vpDisplay::displayFrame(I_depth, c2Mo, cam2, 0.05);
765 // computation time
766 std::stringstream ss;
767 ss << "Computation time: " << t << " ms";
768 vpDisplay::displayText(I, 60, 20, ss.str(), vpColor::red);
769 // nb features
770 ss.str("");
771 ss << "nb features: " << tracker->getError().getRows();
772 vpDisplay::displayText(I_depth, 80, 20, ss.str(), vpColor::red);
773 {
774 std::stringstream ss;
775 ss << "Features: edges " << dynamic_cast<vpMbGenericTracker *>(tracker)->getNbFeaturesEdge() << ", klt "
776 << dynamic_cast<vpMbGenericTracker *>(tracker)->getNbFeaturesKlt() << ", depth "
777 << dynamic_cast<vpMbGenericTracker *>(tracker)->getNbFeaturesDepthDense();
778 vpDisplay::displayText(I, I.getHeight() - 30, 20, ss.str(), vpColor::red);
779 }
780 }
781 }
782
783 if (opt_click_allowed && opt_display) {
784 vpDisplay::displayText(I, 10, 10, "Click to quit", vpColor::red);
786 if (vpDisplay::getClick(I, button, click)) {
787 switch (button) {
789 quit = !click;
790 break;
791
793 click = !click;
794 break;
795
796 default:
797 break;
798 }
799 }
800 }
801
802 if (computeCovariance) {
803 std::cout << "Covariance matrix: \n" << tracker->getCovarianceMatrix() << std::endl << std::endl;
804 }
805
806 if (projectionError) {
807 std::cout << "Projection error: " << tracker->getProjectionError() << std::endl << std::endl;
808 }
809
810 if (opt_display) {
812 vpDisplay::flush(I_depth);
813 }
814
815 frame_index++;
816 }
817
818 std::cout << "\nFinal poses, c1Mo:\n" << c1Mo << "\nc2Mo:\n" << c2Mo << std::endl;
819 std::cout << "\nComputation time, Mean: " << vpMath::getMean(time_vec)
820 << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
821 << std::endl;
822
823 if (opt_click_allowed && !quit) {
825 }
826
827 delete tracker;
828 tracker = NULL;
829
830 return EXIT_SUCCESS;
831 }
832 catch (const vpException &e) {
833 std::cout << "Catch an exception: " << e << std::endl;
834 return EXIT_FAILURE;
835 }
836 }
837
838#elif !(defined(VISP_HAVE_MODULE_MBT) && defined(VISP_HAVE_DISPLAY))
839int main()
840{
841 std::cout << "Cannot run this example: visp_mbt, visp_gui modules are required." << std::endl;
842 return EXIT_SUCCESS;
843}
844#else
845int main()
846{
847 std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
848 return EXIT_SUCCESS;
849}
850#endif
unsigned int getRows() const
Definition vpArray2D.h:290
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:211
static const vpColor darkRed
Definition vpColor.h:212
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void setDownScalingFactor(unsigned int scale)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
unsigned int getDownScalingFactor()
Definition vpDisplay.h:231
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
Implementation of an homogeneous matrix and operations on such kind of matrices.
void load(std::ifstream &f)
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition of the vpImage class member functions.
Definition vpImage.h:135
unsigned int getWidth() const
Definition vpImage.h:242
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition vpImage.h:795
unsigned int getHeight() const
Definition vpImage.h:184
static std::string getViSPImagesDataPath()
static bool checkFilename(const std::string &filename)
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static bool checkDirectory(const std::string &dirname)
static std::string createFilePath(const std::string &parent, const std::string &child)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition vpKltOpencv.h:73
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Definition vpMath.h:116
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:294
Real-time 6D object pose tracking using its CAD model.
Main methods for a model-based tracker.
virtual void resetTracker()=0
virtual void setOgreShowConfigDialog(bool showConfigDialog)
virtual void setDisplayFeatures(bool displayF)
virtual vpColVector getError() const =0
virtual void setAngleDisappear(const double &a)
virtual void setCovarianceComputation(const bool &flag)
virtual void setScanLineVisibilityTest(const bool &v)
virtual void setOgreVisibilityTest(const bool &v)
virtual vpMatrix getCovarianceMatrix() const
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
virtual double getProjectionError() const
virtual void setProjectionErrorComputation(const bool &flag)
virtual void setClipping(const unsigned int &flags)
virtual void setAngleAppear(const double &a)
virtual unsigned int getClipping() const
Definition vpMe.h:122
void setMu1(const double &mu_1)
Definition vpMe.h:353
void setSampleStep(const double &s)
Definition vpMe.h:390
void setRange(const unsigned int &r)
Definition vpMe.h:383
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:445
void setMaskSize(const unsigned int &a)
Definition vpMe.cpp:452
void setMu2(const double &mu_2)
Definition vpMe.h:360
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
Definition vpMe.h:132
void setMaskNumber(const unsigned int &a)
Definition vpMe.cpp:445
void setThreshold(const double &t)
Definition vpMe.h:435
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
VISP_EXPORT double measureTimeMs()