37#include <visp3/core/vpIoTools.h>
38#include <visp3/vision/vpKeyPoint.h>
40#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
47inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
49 if (knnMatches.size() > 0) {
56inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
74 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
75 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
76 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
77 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
78 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
79 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
80 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
81 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
82 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
83 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
84#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
85 m_useBruteForceCrossCheck(true),
87 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
88 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
92 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
93 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
109 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
110 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
111 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
112 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
113 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
114 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
115 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
116 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
117 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
118 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
119#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
120 m_useBruteForceCrossCheck(true),
122 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
123 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
127 m_detectorNames.push_back(detectorName);
128 m_extractorNames.push_back(extractorName);
144 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
145 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
146 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
147 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
148 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
149 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
150 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
151 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
152 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
153 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
154 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
155#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
156 m_useBruteForceCrossCheck(true),
158 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
159 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
173void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
178 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
180 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
183 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
188 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
190 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
191 cv::Mat tcorners = corners * A.t();
192 cv::Mat tcorners_x, tcorners_y;
193 tcorners.col(0).copyTo(tcorners_x);
194 tcorners.col(1).copyTo(tcorners_y);
195 std::vector<cv::Mat> channels;
196 channels.push_back(tcorners_x);
197 channels.push_back(tcorners_y);
198 cv::merge(channels, tcorners);
200 cv::Rect rect = cv::boundingRect(tcorners);
201 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
203 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
206 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
207 double s = 0.8 * sqrt(tilt * tilt - 1);
208 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
209 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
210 A.row(0) = A.row(0) / tilt;
213 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
214 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
217 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
219 cv::invertAffineTransform(A, Ai);
279 m_trainPoints.clear();
280 m_mapOfImageId.clear();
281 m_mapOfImages.clear();
282 m_currentImageId = 1;
284 if (m_useAffineDetection) {
285 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
286 std::vector<cv::Mat> listOfTrainDescriptors;
292 m_trainKeyPoints.clear();
293 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
294 it != listOfTrainKeyPoints.end(); ++it) {
295 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
299 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
303 it->copyTo(m_trainDescriptors);
305 m_trainDescriptors.push_back(*it);
309 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
310 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
315 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
316 m_mapOfImageId[it->class_id] = m_currentImageId;
320 m_mapOfImages[m_currentImageId] = I;
329 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
331 return static_cast<unsigned int>(m_trainKeyPoints.size());
359 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
361 cv::Mat trainDescriptors;
363 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
365 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
367 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
371 std::map<size_t, size_t> mapOfKeypointHashes;
373 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
375 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
378 std::vector<cv::Point3f> trainPoints_tmp;
379 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
380 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
381 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
386 points3f = trainPoints_tmp;
389 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
404 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
406 cv::Mat trainDescriptors;
408 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
410 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
412 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
416 std::map<size_t, size_t> mapOfKeypointHashes;
418 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
420 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
423 std::vector<cv::Point3f> trainPoints_tmp;
424 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
425 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
426 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
431 points3f = trainPoints_tmp;
434 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
451 const std::vector<cv::KeyPoint> &trainKeyPoints,
452 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
453 bool append,
int class_id)
456 m_trainPoints.clear();
457 m_mapOfImageId.clear();
458 m_mapOfImages.clear();
459 m_currentImageId = 0;
460 m_trainKeyPoints.clear();
465 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
467 if (class_id != -1) {
468 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
469 it->class_id = class_id;
475 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
477 m_mapOfImageId[it->class_id] = m_currentImageId;
481 m_mapOfImages[m_currentImageId] = I;
484 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
486 trainDescriptors.copyTo(m_trainDescriptors);
488 m_trainDescriptors.push_back(trainDescriptors);
490 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
498 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
502 return static_cast<unsigned int>(m_trainKeyPoints.size());
518 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
519 bool append,
int class_id)
522 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
544 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
551 vpPlane Po(pts[0], pts[1], pts[2]);
552 double xc = 0.0, yc = 0.0;
563 point_obj = cMo.
inverse() * point_cam;
564 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
586 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
593 vpPlane Po(pts[0], pts[1], pts[2]);
594 double xc = 0.0, yc = 0.0;
605 point_obj = cMo.
inverse() * point_cam;
626 std::vector<cv::KeyPoint> &candidates,
627 const std::vector<vpPolygon> &polygons,
628 const std::vector<std::vector<vpPoint> > &roisPt,
629 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
631 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
638 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
639 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
640 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
644 std::vector<vpPolygon> polygons_tmp = polygons;
645 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
646 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
648 while (it2 != pairOfCandidatesToCheck.end()) {
649 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
650 if (it1->isInside(imPt)) {
651 candidates.push_back(it2->first);
653 points.push_back(pt);
655 if (descriptors != NULL) {
656 desc.push_back(descriptors->row((
int)it2->second));
660 it2 = pairOfCandidatesToCheck.erase(it2);
667 if (descriptors != NULL) {
668 desc.copyTo(*descriptors);
689 std::vector<vpImagePoint> &candidates,
690 const std::vector<vpPolygon> &polygons,
691 const std::vector<std::vector<vpPoint> > &roisPt,
692 std::vector<vpPoint> &points, cv::Mat *descriptors)
694 std::vector<vpImagePoint> candidatesToCheck = candidates;
700 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
701 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
702 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
706 std::vector<vpPolygon> polygons_tmp = polygons;
707 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
708 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
710 while (it2 != pairOfCandidatesToCheck.end()) {
711 if (it1->isInside(it2->first)) {
712 candidates.push_back(it2->first);
714 points.push_back(pt);
716 if (descriptors != NULL) {
717 desc.push_back(descriptors->row((
int)it2->second));
721 it2 = pairOfCandidatesToCheck.erase(it2);
746 const std::vector<vpCylinder> &cylinders,
747 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
748 cv::Mat *descriptors)
750 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
756 size_t cpt_keypoint = 0;
757 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
758 ++it1, cpt_keypoint++) {
759 size_t cpt_cylinder = 0;
762 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
763 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
766 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
768 candidates.push_back(*it1);
772 double xm = 0.0, ym = 0.0;
774 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
776 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
778 point_cam[0] = xm * Z;
779 point_cam[1] = ym * Z;
783 point_obj = cMo.
inverse() * point_cam;
786 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
788 if (descriptors != NULL) {
789 desc.push_back(descriptors->row((
int)cpt_keypoint));
799 if (descriptors != NULL) {
800 desc.copyTo(*descriptors);
821 const std::vector<vpCylinder> &cylinders,
822 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
823 cv::Mat *descriptors)
825 std::vector<vpImagePoint> candidatesToCheck = candidates;
831 size_t cpt_keypoint = 0;
832 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
833 ++it1, cpt_keypoint++) {
834 size_t cpt_cylinder = 0;
837 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
838 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
841 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
843 candidates.push_back(*it1);
847 double xm = 0.0, ym = 0.0;
849 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
851 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
853 point_cam[0] = xm * Z;
854 point_cam[1] = ym * Z;
858 point_obj = cMo.
inverse() * point_cam;
861 points.push_back(pt);
863 if (descriptors != NULL) {
864 desc.push_back(descriptors->row((
int)cpt_keypoint));
874 if (descriptors != NULL) {
875 desc.copyTo(*descriptors);
898 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
900 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
907 cv::Mat cameraMatrix =
917 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
920#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
922 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
923 (
float)m_ransacReprojectionError,
926 inlierIndex, cv::SOLVEPNP_ITERATIVE);
946 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
947 if (m_useConsensusPercentage) {
948 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
951 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
952 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
954 }
catch (cv::Exception &e) {
955 std::cerr << e.what() << std::endl;
959 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
960 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
989 std::vector<vpPoint> &inliers,
double &elapsedTime,
992 std::vector<unsigned int> inlierIndex;
993 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1010 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
1015 if (objectVpPoints.size() < 4) {
1025 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1029 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
1030 if (m_useConsensusPercentage) {
1031 nbInlierToReachConsensus =
1032 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1042 bool isRansacPoseEstimationOk =
false;
1049 if (m_computeCovariance) {
1053 std::cerr <<
"e=" << e.
what() << std::endl;
1071 return isRansacPoseEstimationOk;
1088double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1091 if (matchKeyPoints.size() == 0) {
1097 std::vector<double> errors(matchKeyPoints.size());
1100 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1101 it != matchKeyPoints.end(); ++it, cpt++) {
1106 double u = 0.0, v = 0.0;
1108 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1111 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1163 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1165 if (m_mapOfImages.empty()) {
1166 std::cerr <<
"There is no training image loaded !" << std::endl;
1176 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1179 unsigned int nbWidth = nbImgSqrt;
1181 unsigned int nbHeight = nbImgSqrt;
1184 if (nbImgSqrt * nbImgSqrt < nbImg) {
1188 unsigned int maxW = ICurrent.
getWidth();
1189 unsigned int maxH = ICurrent.
getHeight();
1190 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1192 if (maxW < it->second.getWidth()) {
1193 maxW = it->second.getWidth();
1196 if (maxH < it->second.getHeight()) {
1197 maxH = it->second.getHeight();
1218 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1220 if (m_mapOfImages.empty()) {
1221 std::cerr <<
"There is no training image loaded !" << std::endl;
1231 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1234 unsigned int nbWidth = nbImgSqrt;
1236 unsigned int nbHeight = nbImgSqrt;
1239 if (nbImgSqrt * nbImgSqrt < nbImg) {
1243 unsigned int maxW = ICurrent.
getWidth();
1244 unsigned int maxH = ICurrent.
getHeight();
1245 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1247 if (maxW < it->second.getWidth()) {
1248 maxW = it->second.getWidth();
1251 if (maxH < it->second.getHeight()) {
1252 maxH = it->second.getHeight();
1270 detect(I, keyPoints, elapsedTime, rectangle);
1283 detect(I_color, keyPoints, elapsedTime, rectangle);
1297 detect(matImg, keyPoints, elapsedTime, mask);
1313 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1316 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1318 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1320 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1323 detect(matImg, keyPoints, elapsedTime, mask);
1339 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1342 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1344 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1346 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1349 detect(matImg, keyPoints, elapsedTime, mask);
1362 const cv::Mat &mask)
1367 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1368 it != m_detectors.end(); ++it) {
1369 std::vector<cv::KeyPoint> kp;
1371 it->second->detect(matImg, kp, mask);
1372 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1387 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1389 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1392 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1407 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1409 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1412 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1427 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1430 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1444 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1447 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1464 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1467 srand((
unsigned int)time(NULL));
1470 std::vector<vpImagePoint> queryImageKeyPoints;
1472 std::vector<vpImagePoint> trainImageKeyPoints;
1476 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1478 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1481 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1482 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1483 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1502 unsigned int lineThickness,
const vpColor &color)
1505 srand((
unsigned int)time(NULL));
1508 std::vector<vpImagePoint> queryImageKeyPoints;
1510 std::vector<vpImagePoint> trainImageKeyPoints;
1514 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1516 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1519 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1520 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1521 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1540 unsigned int lineThickness,
const vpColor &color)
1543 srand((
unsigned int)time(NULL));
1546 std::vector<vpImagePoint> queryImageKeyPoints;
1548 std::vector<vpImagePoint> trainImageKeyPoints;
1552 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1554 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1557 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1558 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1559 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1578 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1579 unsigned int lineThickness)
1581 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1583 std::cerr <<
"There is no training image loaded !" << std::endl;
1589 int nbImg = (int)(m_mapOfImages.size() + 1);
1597 int nbWidth = nbImgSqrt;
1598 int nbHeight = nbImgSqrt;
1600 if (nbImgSqrt * nbImgSqrt < nbImg) {
1604 std::map<int, int> mapOfImageIdIndex;
1607 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1609 mapOfImageIdIndex[it->first] = cpt;
1611 if (maxW < it->second.getWidth()) {
1612 maxW = it->second.getWidth();
1615 if (maxH < it->second.getHeight()) {
1616 maxH = it->second.getHeight();
1622 int medianI = nbHeight / 2;
1623 int medianJ = nbWidth / 2;
1624 int medianIndex = medianI * nbWidth + medianJ;
1625 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1627 int current_class_id_index = 0;
1628 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1629 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1633 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1636 int indexI = current_class_id_index / nbWidth;
1637 int indexJ = current_class_id_index - (indexI * nbWidth);
1638 topLeftCorner.
set_ij((
int)maxH * indexI, (
int)maxW * indexJ);
1645 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1646 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1651 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1656 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1662 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1663 int current_class_id = 0;
1664 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1665 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1669 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1672 int indexI = current_class_id / nbWidth;
1673 int indexJ = current_class_id - (indexI * nbWidth);
1675 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1676 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1677 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1678 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1699 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1700 unsigned int lineThickness)
1702 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1704 std::cerr <<
"There is no training image loaded !" << std::endl;
1710 int nbImg = (int)(m_mapOfImages.size() + 1);
1718 int nbWidth = nbImgSqrt;
1719 int nbHeight = nbImgSqrt;
1721 if (nbImgSqrt * nbImgSqrt < nbImg) {
1725 std::map<int, int> mapOfImageIdIndex;
1728 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1730 mapOfImageIdIndex[it->first] = cpt;
1732 if (maxW < it->second.getWidth()) {
1733 maxW = it->second.getWidth();
1736 if (maxH < it->second.getHeight()) {
1737 maxH = it->second.getHeight();
1743 int medianI = nbHeight / 2;
1744 int medianJ = nbWidth / 2;
1745 int medianIndex = medianI * nbWidth + medianJ;
1746 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1748 int current_class_id_index = 0;
1749 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1750 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1754 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1757 int indexI = current_class_id_index / nbWidth;
1758 int indexJ = current_class_id_index - (indexI * nbWidth);
1759 topLeftCorner.
set_ij((
int)maxH * indexI, (
int)maxW * indexJ);
1766 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1767 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1772 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1777 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1783 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1784 int current_class_id = 0;
1785 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1786 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1790 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1793 int indexI = current_class_id / nbWidth;
1794 int indexJ = current_class_id - (indexI * nbWidth);
1796 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1797 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1798 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1799 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1819 std::vector<cv::Point3f> *trainPoints)
1822 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1836 std::vector<cv::Point3f> *trainPoints)
1839 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1853 std::vector<cv::Point3f> *trainPoints)
1856 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1871 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1875 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1890 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1894 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1909 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1914 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1915 itd != m_extractors.end(); ++itd) {
1919 if (trainPoints != NULL && !trainPoints->empty()) {
1922 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1925 itd->second->compute(matImg, keyPoints, descriptors);
1927 if (keyPoints.size() != keyPoints_tmp.size()) {
1931 std::map<size_t, size_t> mapOfKeypointHashes;
1933 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1935 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1938 std::vector<cv::Point3f> trainPoints_tmp;
1939 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1940 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1941 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1946 *trainPoints = trainPoints_tmp;
1950 itd->second->compute(matImg, keyPoints, descriptors);
1955 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1959 itd->second->compute(matImg, keyPoints, desc);
1961 if (keyPoints.size() != keyPoints_tmp.size()) {
1965 std::map<size_t, size_t> mapOfKeypointHashes;
1967 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1969 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1972 std::vector<cv::Point3f> trainPoints_tmp;
1973 cv::Mat descriptors_tmp;
1974 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1975 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1976 if (trainPoints != NULL && !trainPoints->empty()) {
1977 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1980 if (!descriptors.empty()) {
1981 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1986 if (trainPoints != NULL) {
1988 *trainPoints = trainPoints_tmp;
1991 descriptors_tmp.copyTo(descriptors);
1995 if (descriptors.empty()) {
1996 desc.copyTo(descriptors);
1998 cv::hconcat(descriptors, desc, descriptors);
2003 if (keyPoints.size() != (
size_t)descriptors.rows) {
2004 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2012void vpKeyPoint::filterMatches()
2014 std::vector<cv::KeyPoint> queryKpts;
2015 std::vector<cv::Point3f> trainPts;
2016 std::vector<cv::DMatch> m;
2022 double min_dist = DBL_MAX;
2024 std::vector<double> distance_vec(m_knnMatches.size());
2027 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2028 double dist = m_knnMatches[i][0].distance;
2030 distance_vec[i] = dist;
2032 if (dist < min_dist) {
2039 mean /= m_queryDescriptors.rows;
2042 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2043 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2044 double threshold = min_dist + stdev;
2046 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2047 if (m_knnMatches[i].size() >= 2) {
2050 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2055 double dist = m_knnMatches[i][0].distance;
2058 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2060 if (!m_trainPoints.empty()) {
2061 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
2063 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
2071 double min_dist = DBL_MAX;
2073 std::vector<double> distance_vec(m_matches.size());
2074 for (
size_t i = 0; i < m_matches.size(); i++) {
2075 double dist = m_matches[i].distance;
2077 distance_vec[i] = dist;
2079 if (dist < min_dist) {
2086 mean /= m_queryDescriptors.rows;
2088 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2089 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2099 for (
size_t i = 0; i < m_matches.size(); i++) {
2100 if (m_matches[i].distance <= threshold) {
2101 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2103 if (!m_trainPoints.empty()) {
2104 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
2106 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
2111 if (m_useSingleMatchFilter) {
2114 std::vector<cv::DMatch> mTmp;
2115 std::vector<cv::Point3f> trainPtsTmp;
2116 std::vector<cv::KeyPoint> queryKptsTmp;
2118 std::map<int, int> mapOfTrainIdx;
2120 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2121 mapOfTrainIdx[it->trainIdx]++;
2125 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2126 if (mapOfTrainIdx[it->trainIdx] == 1) {
2127 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
2129 if (!m_trainPoints.empty()) {
2130 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
2132 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
2136 m_filteredMatches = mTmp;
2137 m_objectFilteredPoints = trainPtsTmp;
2138 m_queryFilteredKeyPoints = queryKptsTmp;
2140 m_filteredMatches = m;
2141 m_objectFilteredPoints = trainPts;
2142 m_queryFilteredKeyPoints = queryKpts;
2155 objectPoints = m_objectFilteredPoints;
2181 keyPoints = m_queryFilteredKeyPoints;
2183 keyPoints = m_queryKeyPoints;
2238void vpKeyPoint::init()
2241#if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2243 if (!cv::initModule_nonfree()) {
2244 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2254 initDetectors(m_detectorNames);
2255 initExtractors(m_extractorNames);
2264void vpKeyPoint::initDetector(
const std::string &detectorName)
2266#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2267 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2269 if (m_detectors[detectorName] == NULL) {
2270 std::stringstream ss_msg;
2271 ss_msg <<
"Fail to initialize the detector: " << detectorName
2272 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2276 std::string detectorNameTmp = detectorName;
2277 std::string pyramid =
"Pyramid";
2278 std::size_t pos = detectorName.find(pyramid);
2279 bool usePyramid =
false;
2280 if (pos != std::string::npos) {
2281 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2285 if (detectorNameTmp ==
"SIFT") {
2286#if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
2287 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
2289 m_detectors[detectorNameTmp] = siftDetector;
2291 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2294#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2295 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2297 cv::Ptr<cv::FeatureDetector> siftDetector;
2298 if (m_maxFeatures > 0) {
2299#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2300 siftDetector = cv::SIFT::create(m_maxFeatures);
2302 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2305#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2306 siftDetector = cv::SIFT::create();
2308 siftDetector = cv::xfeatures2d::SIFT::create();
2312 m_detectors[detectorNameTmp] = siftDetector;
2314 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
2315 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2318 std::stringstream ss_msg;
2319 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2320 <<
" was not build with xFeatures2d module.";
2324 }
else if (detectorNameTmp ==
"SURF") {
2325#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2326 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2328 m_detectors[detectorNameTmp] = surfDetector;
2330 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
2331 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2334 std::stringstream ss_msg;
2335 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2336 <<
" was not build with xFeatures2d module.";
2339 }
else if (detectorNameTmp ==
"FAST") {
2340 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2342 m_detectors[detectorNameTmp] = fastDetector;
2344 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2346 }
else if (detectorNameTmp ==
"MSER") {
2347 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2349 m_detectors[detectorNameTmp] = fastDetector;
2351 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2353 }
else if (detectorNameTmp ==
"ORB") {
2354 cv::Ptr<cv::FeatureDetector> orbDetector;
2355 if (m_maxFeatures > 0) {
2356 orbDetector = cv::ORB::create(m_maxFeatures);
2358 orbDetector = cv::ORB::create();
2361 m_detectors[detectorNameTmp] = orbDetector;
2363 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
2364 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2366 }
else if (detectorNameTmp ==
"BRISK") {
2367 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2369 m_detectors[detectorNameTmp] = briskDetector;
2371 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
2372 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2374 }
else if (detectorNameTmp ==
"KAZE") {
2375 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2377 m_detectors[detectorNameTmp] = kazeDetector;
2379 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2380 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2382 }
else if (detectorNameTmp ==
"AKAZE") {
2383 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2385 m_detectors[detectorNameTmp] = akazeDetector;
2387 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2388 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2390 }
else if (detectorNameTmp ==
"GFTT") {
2391 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2393 m_detectors[detectorNameTmp] = gfttDetector;
2395 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2397 }
else if (detectorNameTmp ==
"SimpleBlob") {
2398 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2400 m_detectors[detectorNameTmp] = simpleBlobDetector;
2402 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2404 }
else if (detectorNameTmp ==
"STAR") {
2405#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2406 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2408 m_detectors[detectorNameTmp] = starDetector;
2410 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2413 std::stringstream ss_msg;
2414 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2415 <<
" was not build with xFeatures2d module.";
2418 }
else if (detectorNameTmp ==
"AGAST") {
2419 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2421 m_detectors[detectorNameTmp] = agastDetector;
2423 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2425 }
else if (detectorNameTmp ==
"MSD") {
2426#if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2427#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2428 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2430 m_detectors[detectorNameTmp] = msdDetector;
2432 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2433 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2436 std::stringstream ss_msg;
2437 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2438 <<
" was not build with xFeatures2d module.";
2442 std::stringstream ss_msg;
2443 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
2444 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2447 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2450 bool detectorInitialized =
false;
2453 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2456 detectorInitialized = !m_detectors[detectorName].empty();
2459 if (!detectorInitialized) {
2460 std::stringstream ss_msg;
2461 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2462 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2474void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2476 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2486void vpKeyPoint::initExtractor(
const std::string &extractorName)
2488#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2489 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2491 if (extractorName ==
"SIFT") {
2492#if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
2493 m_extractors[extractorName] = cv::SIFT::create();
2495#if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2496 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2498#if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2499 m_extractors[extractorName] = cv::SIFT::create();
2501 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2504 std::stringstream ss_msg;
2505 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2506 <<
" was not build with xFeatures2d module.";
2510 }
else if (extractorName ==
"SURF") {
2511#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2513 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2515 std::stringstream ss_msg;
2516 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2517 <<
" was not build with xFeatures2d module.";
2520 }
else if (extractorName ==
"ORB") {
2521 m_extractors[extractorName] = cv::ORB::create();
2522 }
else if (extractorName ==
"BRISK") {
2523 m_extractors[extractorName] = cv::BRISK::create();
2524 }
else if (extractorName ==
"FREAK") {
2525#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2526 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2528 std::stringstream ss_msg;
2529 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2530 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2533 }
else if (extractorName ==
"BRIEF") {
2534#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2535 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2537 std::stringstream ss_msg;
2538 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2539 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2542 }
else if (extractorName ==
"KAZE") {
2543 m_extractors[extractorName] = cv::KAZE::create();
2544 }
else if (extractorName ==
"AKAZE") {
2545 m_extractors[extractorName] = cv::AKAZE::create();
2546 }
else if (extractorName ==
"DAISY") {
2547#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2548 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2550 std::stringstream ss_msg;
2551 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2552 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2555 }
else if (extractorName ==
"LATCH") {
2556#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2557 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2559 std::stringstream ss_msg;
2560 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2561 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2564 }
else if (extractorName ==
"LUCID") {
2565#ifdef VISP_HAVE_OPENCV_XFEATURES2D
2570 std::stringstream ss_msg;
2571 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2572 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2575 }
else if (extractorName ==
"VGG") {
2576#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2577#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2578 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2580 std::stringstream ss_msg;
2581 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2582 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2586 std::stringstream ss_msg;
2587 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2588 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2591 }
else if (extractorName ==
"BoostDesc") {
2592#if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2593#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2594 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2596 std::stringstream ss_msg;
2597 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2598 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2602 std::stringstream ss_msg;
2603 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2604 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2608 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2612 if (!m_extractors[extractorName]) {
2613 std::stringstream ss_msg;
2614 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2615 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2619#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2620 if (extractorName ==
"SURF") {
2622 m_extractors[extractorName]->set(
"extended", 1);
2633void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2635 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2639 int descriptorType = CV_32F;
2640 bool firstIteration =
true;
2641 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2642 it != m_extractors.end(); ++it) {
2643 if (firstIteration) {
2644 firstIteration =
false;
2645 descriptorType = it->second->descriptorType();
2647 if (descriptorType != it->second->descriptorType()) {
2654void vpKeyPoint::initFeatureNames()
2657#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2664#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2665 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2667#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2668 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2671#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2674#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2679#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2680 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2684#if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2687#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2688 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2689 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2691#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2692 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2695#if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2698#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2701#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2702 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2703 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2706#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2707 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2708 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2720 int descriptorType = CV_32F;
2721 bool firstIteration =
true;
2722 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2723 it != m_extractors.end(); ++it) {
2724 if (firstIteration) {
2725 firstIteration =
false;
2726 descriptorType = it->second->descriptorType();
2728 if (descriptorType != it->second->descriptorType()) {
2734 if (matcherName ==
"FlannBased") {
2735 if (m_extractors.empty()) {
2736 std::cout <<
"Warning: No extractor initialized, by default use "
2737 "floating values (CV_32F) "
2738 "for descriptor type !"
2742 if (descriptorType == CV_8U) {
2743#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2744 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2746 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2749#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2750 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2752 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2756 m_matcher = cv::DescriptorMatcher::create(matcherName);
2759#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2760 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2761 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2766 std::stringstream ss_msg;
2767 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2768 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2785 IMatching.
insert(IRef, topLeftCorner);
2787 IMatching.
insert(ICurrent, topLeftCorner);
2802 IMatching.
insert(IRef, topLeftCorner);
2804 IMatching.
insert(ICurrent, topLeftCorner);
2818 int nbImg = (int)(m_mapOfImages.size() + 1);
2820 if (m_mapOfImages.empty()) {
2821 std::cerr <<
"There is no training image loaded !" << std::endl;
2831 int nbWidth = nbImgSqrt;
2832 int nbHeight = nbImgSqrt;
2834 if (nbImgSqrt * nbImgSqrt < nbImg) {
2839 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2841 if (maxW < it->second.getWidth()) {
2842 maxW = it->second.getWidth();
2845 if (maxH < it->second.getHeight()) {
2846 maxH = it->second.getHeight();
2852 int medianI = nbHeight / 2;
2853 int medianJ = nbWidth / 2;
2854 int medianIndex = medianI * nbWidth + medianJ;
2857 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2859 int local_cpt = cpt;
2860 if (cpt >= medianIndex) {
2865 int indexI = local_cpt / nbWidth;
2866 int indexJ = local_cpt - (indexI * nbWidth);
2867 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2869 IMatching.
insert(it->second, topLeftCorner);
2872 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2873 IMatching.
insert(ICurrent, topLeftCorner);
2888 int nbImg = (int)(m_mapOfImages.size() + 1);
2890 if (m_mapOfImages.empty()) {
2891 std::cerr <<
"There is no training image loaded !" << std::endl;
2903 int nbWidth = nbImgSqrt;
2904 int nbHeight = nbImgSqrt;
2906 if (nbImgSqrt * nbImgSqrt < nbImg) {
2911 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2913 if (maxW < it->second.getWidth()) {
2914 maxW = it->second.getWidth();
2917 if (maxH < it->second.getHeight()) {
2918 maxH = it->second.getHeight();
2924 int medianI = nbHeight / 2;
2925 int medianJ = nbWidth / 2;
2926 int medianIndex = medianI * nbWidth + medianJ;
2929 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2931 int local_cpt = cpt;
2932 if (cpt >= medianIndex) {
2937 int indexI = local_cpt / nbWidth;
2938 int indexJ = local_cpt - (indexI * nbWidth);
2939 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2943 IMatching.
insert(IRef, topLeftCorner);
2946 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2947 IMatching.
insert(ICurrent, topLeftCorner);
2962 m_detectorNames.clear();
2963 m_extractorNames.clear();
2964 m_detectors.clear();
2965 m_extractors.clear();
2967 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2970 xmlp.
parse(configFile);
3034 int startClassId = 0;
3035 int startImageId = 0;
3037 m_trainKeyPoints.clear();
3038 m_trainPoints.clear();
3039 m_mapOfImageId.clear();
3040 m_mapOfImages.clear();
3043 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3044 if (startClassId < it->first) {
3045 startClassId = it->first;
3050 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3052 if (startImageId < it->first) {
3053 startImageId = it->first;
3060 if (!parent.empty()) {
3065 std::ifstream file(filename.c_str(), std::ifstream::binary);
3066 if (!file.is_open()) {
3074#if !defined(VISP_HAVE_MODULE_IO)
3076 std::cout <<
"Warning: The learning file contains image data that will "
3077 "not be loaded as visp_io module "
3078 "is not available !"
3083 for (
int i = 0; i < nbImgs; i++) {
3091 char *path =
new char[length + 1];
3093 for (
int cpt = 0; cpt < length; cpt++) {
3095 file.read((
char *)(&c),
sizeof(c));
3098 path[length] =
'\0';
3101#ifdef VISP_HAVE_MODULE_IO
3109 m_mapOfImages[
id + startImageId] = I;
3117 int have3DInfoInt = 0;
3119 bool have3DInfo = have3DInfoInt != 0;
3130 int descriptorType = 5;
3133 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3134 for (
int i = 0; i < nRows; i++) {
3136 float u, v, size, angle, response;
3137 int octave, class_id, image_id;
3146 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3147 m_trainKeyPoints.push_back(keyPoint);
3149 if (image_id != -1) {
3150#ifdef VISP_HAVE_MODULE_IO
3152 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3162 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3165 for (
int j = 0; j < nCols; j++) {
3167 switch (descriptorType) {
3169 unsigned char value;
3170 file.read((
char *)(&value),
sizeof(value));
3171 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
3176 file.read((
char *)(&value),
sizeof(value));
3177 trainDescriptorsTmp.at<
char>(i, j) = value;
3181 unsigned short int value;
3183 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
3189 trainDescriptorsTmp.at<
short int>(i, j) = value;
3195 trainDescriptorsTmp.at<
int>(i, j) = value;
3201 trainDescriptorsTmp.at<
float>(i, j) = value;
3207 trainDescriptorsTmp.at<
double>(i, j) = value;
3213 trainDescriptorsTmp.at<
float>(i, j) = value;
3219 if (!append || m_trainDescriptors.empty()) {
3220 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3222 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3227 pugi::xml_document doc;
3230 if (!doc.load_file(filename.c_str())) {
3234 pugi::xml_node root_element = doc.document_element();
3236 int descriptorType = CV_32F;
3237 int nRows = 0, nCols = 0;
3240 cv::Mat trainDescriptorsTmp;
3242 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
3243 first_level_node = first_level_node.next_sibling()) {
3245 std::string name(first_level_node.name());
3246 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
3248 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
3249 image_info_node = image_info_node.next_sibling()) {
3250 name = std::string(image_info_node.name());
3252 if (name ==
"trainImg") {
3254 int id = image_info_node.attribute(
"image_id").as_int();
3257#ifdef VISP_HAVE_MODULE_IO
3258 std::string path(image_info_node.text().as_string());
3267 m_mapOfImages[
id + startImageId] = I;
3271 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
3272 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3273 descriptors_info_node = descriptors_info_node.next_sibling()) {
3274 if (descriptors_info_node.type() == pugi::node_element) {
3275 name = std::string(descriptors_info_node.name());
3277 if (name ==
"nrows") {
3278 nRows = descriptors_info_node.text().as_int();
3279 }
else if (name ==
"ncols") {
3280 nCols = descriptors_info_node.text().as_int();
3281 }
else if (name ==
"type") {
3282 descriptorType = descriptors_info_node.text().as_int();
3287 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3288 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
3289 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3290 int octave = 0, class_id = 0, image_id = 0;
3291 double oX = 0.0, oY = 0.0, oZ = 0.0;
3293 std::stringstream ss;
3295 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
3296 point_node = point_node.next_sibling()) {
3297 if (point_node.type() == pugi::node_element) {
3298 name = std::string(point_node.name());
3302 u = point_node.text().as_double();
3303 }
else if (name ==
"v") {
3304 v = point_node.text().as_double();
3305 }
else if (name ==
"size") {
3306 size = point_node.text().as_double();
3307 }
else if (name ==
"angle") {
3308 angle = point_node.text().as_double();
3309 }
else if (name ==
"response") {
3310 response = point_node.text().as_double();
3311 }
else if (name ==
"octave") {
3312 octave = point_node.text().as_int();
3313 }
else if (name ==
"class_id") {
3314 class_id = point_node.text().as_int();
3315 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
3316 (class_id + startClassId));
3317 m_trainKeyPoints.push_back(keyPoint);
3318 }
else if (name ==
"image_id") {
3319 image_id = point_node.text().as_int();
3320 if (image_id != -1) {
3321#ifdef VISP_HAVE_MODULE_IO
3323 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3326 }
else if (name ==
"oX") {
3327 oX = point_node.text().as_double();
3328 }
else if (name ==
"oY") {
3329 oY = point_node.text().as_double();
3330 }
else if (name ==
"oZ") {
3331 oZ = point_node.text().as_double();
3332 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
3333 }
else if (name ==
"desc") {
3336 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3337 descriptor_value_node = descriptor_value_node.next_sibling()) {
3339 if (descriptor_value_node.type() == pugi::node_element) {
3341 std::string parseStr(descriptor_value_node.text().as_string());
3346 switch (descriptorType) {
3351 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
3358 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
3362 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
3366 ss >> trainDescriptorsTmp.at<
short int>(i, j);
3370 ss >> trainDescriptorsTmp.at<
int>(i, j);
3374 ss >> trainDescriptorsTmp.at<
float>(i, j);
3378 ss >> trainDescriptorsTmp.at<
double>(i, j);
3382 ss >> trainDescriptorsTmp.at<
float>(i, j);
3386 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
3399 if (!append || m_trainDescriptors.empty()) {
3400 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3402 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3412 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3418 m_currentImageId = (int)m_mapOfImages.size();
3430 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3435 m_knnMatches.clear();
3437 if (m_useMatchTrainToQuery) {
3438 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3441 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3442 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3444 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3445 it1 != knnMatchesTmp.end(); ++it1) {
3446 std::vector<cv::DMatch> tmp;
3447 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3448 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3450 m_knnMatches.push_back(tmp);
3453 matches.resize(m_knnMatches.size());
3454 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3457 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3458 matches.resize(m_knnMatches.size());
3459 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3464 if (m_useMatchTrainToQuery) {
3465 std::vector<cv::DMatch> matchesTmp;
3467 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3468 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3470 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3471 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3475 m_matcher->match(queryDescriptors, matches);
3541 if (m_trainDescriptors.empty()) {
3542 std::cerr <<
"Reference is empty." << std::endl;
3544 std::cerr <<
"Reference is not computed." << std::endl;
3546 std::cerr <<
"Matching is not possible." << std::endl;
3551 if (m_useAffineDetection) {
3552 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3553 std::vector<cv::Mat> listOfQueryDescriptors;
3559 m_queryKeyPoints.clear();
3560 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3561 it != listOfQueryKeyPoints.end(); ++it) {
3562 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3566 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3570 it->copyTo(m_queryDescriptors);
3572 m_queryDescriptors.push_back(*it);
3576 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3577 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3580 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3593 m_queryKeyPoints = queryKeyPoints;
3594 m_queryDescriptors = queryDescriptors;
3596 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3599 m_queryFilteredKeyPoints.clear();
3600 m_objectFilteredPoints.clear();
3601 m_filteredMatches.clear();
3605 if (m_useMatchTrainToQuery) {
3607 m_queryFilteredKeyPoints.clear();
3608 m_filteredMatches.clear();
3609 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3610 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3611 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3614 m_queryFilteredKeyPoints = m_queryKeyPoints;
3615 m_filteredMatches = m_matches;
3618 if (!m_trainPoints.empty()) {
3619 m_objectFilteredPoints.clear();
3623 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3625 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3634 return static_cast<unsigned int>(m_filteredMatches.size());
3666 double error, elapsedTime;
3667 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3685 double error, elapsedTime;
3686 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3709 if (m_trainDescriptors.empty()) {
3710 std::cerr <<
"Reference is empty." << std::endl;
3712 std::cerr <<
"Reference is not computed." << std::endl;
3714 std::cerr <<
"Matching is not possible." << std::endl;
3719 if (m_useAffineDetection) {
3720 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3721 std::vector<cv::Mat> listOfQueryDescriptors;
3727 m_queryKeyPoints.clear();
3728 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3729 it != listOfQueryKeyPoints.end(); ++it) {
3730 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3734 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3738 it->copyTo(m_queryDescriptors);
3740 m_queryDescriptors.push_back(*it);
3744 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3745 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3748 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3750 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3753 m_queryFilteredKeyPoints.clear();
3754 m_objectFilteredPoints.clear();
3755 m_filteredMatches.clear();
3759 if (m_useMatchTrainToQuery) {
3761 m_queryFilteredKeyPoints.clear();
3762 m_filteredMatches.clear();
3763 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3764 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3765 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3768 m_queryFilteredKeyPoints = m_queryKeyPoints;
3769 m_filteredMatches = m_matches;
3772 if (!m_trainPoints.empty()) {
3773 m_objectFilteredPoints.clear();
3777 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3779 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3791 m_ransacInliers.clear();
3792 m_ransacOutliers.clear();
3794 if (m_useRansacVVS) {
3795 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3799 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3800 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3804 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3806 double x = 0.0, y = 0.0;
3811 objectVpPoints[cpt] = pt;
3814 std::vector<vpPoint> inliers;
3815 std::vector<unsigned int> inlierIndex;
3817 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3819 std::map<unsigned int, bool> mapOfInlierIndex;
3820 m_matchRansacKeyPointsToPoints.clear();
3822 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3823 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3824 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3825 mapOfInlierIndex[*it] =
true;
3828 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3829 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3830 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3834 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3836 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3837 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3838 m_ransacInliers.begin(), matchRansacToVpImage);
3840 elapsedTime += m_poseTime;
3844 std::vector<cv::Point2f> imageFilteredPoints;
3845 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3846 std::vector<int> inlierIndex;
3847 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3849 std::map<int, bool> mapOfInlierIndex;
3850 m_matchRansacKeyPointsToPoints.clear();
3852 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3853 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3854 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3855 mapOfInlierIndex[*it] =
true;
3858 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3859 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3860 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3864 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3866 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3867 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3868 m_ransacInliers.begin(), matchRansacToVpImage);
3870 elapsedTime += m_poseTime;
3896 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3920 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3921 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3922 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3924 if (imPts1 != NULL && imPts2 != NULL) {
3931 double meanDescriptorDistanceTmp = 0.0;
3932 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3933 meanDescriptorDistanceTmp += (double)it->distance;
3936 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3937 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3939 if (meanDescriptorDistance != NULL) {
3940 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3942 if (detection_score != NULL) {
3943 *detection_score = score;
3946 if (m_filteredMatches.size() >= 4) {
3948 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3950 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3952 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3953 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3954 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3957 std::vector<vpImagePoint> inliers;
3958 if (isPlanarObject) {
3959#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3960 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3962 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3965 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3967 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3968 realPoint.at<
double>(0, 0) = points1[i].x;
3969 realPoint.at<
double>(1, 0) = points1[i].y;
3970 realPoint.at<
double>(2, 0) = 1.f;
3972 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3973 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3974 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3975 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3977 if (reprojectionError < 6.0) {
3978 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3979 if (imPts1 != NULL) {
3980 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3983 if (imPts2 != NULL) {
3984 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3988 }
else if (m_filteredMatches.size() >= 8) {
3989 cv::Mat fundamentalInliers;
3990 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3992 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3993 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3994 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3996 if (imPts1 != NULL) {
3997 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
4000 if (imPts2 != NULL) {
4001 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
4007 if (!inliers.empty()) {
4014 double meanU = 0.0, meanV = 0.0;
4015 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4016 meanU += it->get_u();
4017 meanV += it->get_v();
4020 meanU /= (double)inliers.size();
4021 meanV /= (double)inliers.size();
4023 centerOfGravity.
set_u(meanU);
4024 centerOfGravity.
set_v(meanV);
4032 return meanDescriptorDistanceTmp < m_detectionThreshold;
4034 return score > m_detectionScore;
4062 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4067 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4069 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4073 modelImagePoints[cpt] = imPt;
4082 double meanU = 0.0, meanV = 0.0;
4083 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4084 meanU += it->get_u();
4085 meanV += it->get_v();
4088 meanU /= (double)m_ransacInliers.size();
4089 meanV /= (double)m_ransacInliers.size();
4091 centerOfGravity.
set_u(meanU);
4092 centerOfGravity.
set_v(meanV);
4113 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4114 std::vector<cv::Mat> &listOfDescriptors,
4120 listOfKeypoints.clear();
4121 listOfDescriptors.clear();
4123 for (
int tl = 1; tl < 6; tl++) {
4124 double t = pow(2, 0.5 * tl);
4125 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4126 std::vector<cv::KeyPoint> keypoints;
4127 cv::Mat descriptors;
4129 cv::Mat timg, mask, Ai;
4132 affineSkew(t, phi, timg, mask, Ai);
4135 if(listOfAffineI != NULL) {
4137 bitwise_and(mask, timg, img_disp);
4140 listOfAffineI->push_back(tI);
4144 cv::bitwise_and(mask, timg, img_disp);
4145 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4146 cv::imshow(
"Skew", img_disp );
4150 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4151 it != m_detectors.end(); ++it) {
4152 std::vector<cv::KeyPoint> kp;
4153 it->second->detect(timg, kp, mask);
4154 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4158 extract(timg, keypoints, descriptors, elapsedTime);
4160 for(
unsigned int i = 0; i < keypoints.size(); i++) {
4161 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4162 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4163 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4164 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4167 listOfKeypoints.push_back(keypoints);
4168 listOfDescriptors.push_back(descriptors);
4177 std::vector<std::pair<double, int> > listOfAffineParams;
4178 for (
int tl = 1; tl < 6; tl++) {
4179 double t = pow(2, 0.5 * tl);
4180 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4181 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4185 listOfKeypoints.resize(listOfAffineParams.size());
4186 listOfDescriptors.resize(listOfAffineParams.size());
4188 if (listOfAffineI != NULL) {
4189 listOfAffineI->resize(listOfAffineParams.size());
4192#ifdef VISP_HAVE_OPENMP
4193#pragma omp parallel for
4195 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4196 std::vector<cv::KeyPoint> keypoints;
4197 cv::Mat descriptors;
4199 cv::Mat timg, mask, Ai;
4202 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
4204 if (listOfAffineI != NULL) {
4206 bitwise_and(mask, timg, img_disp);
4209 (*listOfAffineI)[(size_t)cpt] = tI;
4214 cv::bitwise_and(mask, timg, img_disp);
4215 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4216 cv::imshow(
"Skew", img_disp );
4220 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4221 it != m_detectors.end(); ++it) {
4222 std::vector<cv::KeyPoint> kp;
4223 it->second->detect(timg, kp, mask);
4224 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4228 extract(timg, keypoints, descriptors, elapsedTime);
4230 for (
size_t i = 0; i < keypoints.size(); i++) {
4231 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4232 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4233 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4234 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4237 listOfKeypoints[(size_t)cpt] = keypoints;
4238 listOfDescriptors[(size_t)cpt] = descriptors;
4254 m_computeCovariance =
false;
4256 m_currentImageId = 0;
4258 m_detectionScore = 0.15;
4259 m_detectionThreshold = 100.0;
4260 m_detectionTime = 0.0;
4261 m_detectorNames.clear();
4262 m_detectors.clear();
4263 m_extractionTime = 0.0;
4264 m_extractorNames.clear();
4265 m_extractors.clear();
4266 m_filteredMatches.clear();
4269 m_knnMatches.clear();
4270 m_mapOfImageId.clear();
4271 m_mapOfImages.clear();
4272 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4273 m_matcherName =
"BruteForce-Hamming";
4275 m_matchingFactorThreshold = 2.0;
4276 m_matchingRatioThreshold = 0.85;
4277 m_matchingTime = 0.0;
4278 m_matchRansacKeyPointsToPoints.clear();
4279 m_nbRansacIterations = 200;
4280 m_nbRansacMinInlierCount = 100;
4281 m_objectFilteredPoints.clear();
4283 m_queryDescriptors = cv::Mat();
4284 m_queryFilteredKeyPoints.clear();
4285 m_queryKeyPoints.clear();
4286 m_ransacConsensusPercentage = 20.0;
4288 m_ransacInliers.clear();
4289 m_ransacOutliers.clear();
4290 m_ransacParallel =
true;
4291 m_ransacParallelNbThreads = 0;
4292 m_ransacReprojectionError = 6.0;
4293 m_ransacThreshold = 0.01;
4294 m_trainDescriptors = cv::Mat();
4295 m_trainKeyPoints.clear();
4296 m_trainPoints.clear();
4297 m_trainVpPoints.clear();
4298 m_useAffineDetection =
false;
4299#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4300 m_useBruteForceCrossCheck =
true;
4302 m_useConsensusPercentage =
false;
4304 m_useMatchTrainToQuery =
false;
4305 m_useRansacVVS =
true;
4306 m_useSingleMatchFilter =
true;
4308 m_detectorNames.push_back(
"ORB");
4309 m_extractorNames.push_back(
"ORB");
4325 if (!parent.empty()) {
4329 std::map<int, std::string> mapOfImgPath;
4330 if (saveTrainingImages) {
4331#ifdef VISP_HAVE_MODULE_IO
4333 unsigned int cpt = 0;
4335 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4341 std::stringstream ss;
4342 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
4344 switch (m_imageFormat) {
4366 std::string imgFilename = ss.str();
4367 mapOfImgPath[it->first] = imgFilename;
4368 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
4371 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
4372 "are not saved because "
4373 "visp_io module is not available !"
4378 bool have3DInfo = m_trainPoints.size() > 0;
4379 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4385 std::ofstream file(filename.c_str(), std::ofstream::binary);
4386 if (!file.is_open()) {
4391 int nbImgs = (int)mapOfImgPath.size();
4394#ifdef VISP_HAVE_MODULE_IO
4395 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4401 std::string path = it->second;
4402 int length = (int)path.length();
4405 for (
int cpt = 0; cpt < length; cpt++) {
4406 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
4412 int have3DInfoInt = have3DInfo ? 1 : 0;
4415 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4416 int descriptorType = m_trainDescriptors.type();
4427 for (
int i = 0; i < nRows; i++) {
4428 unsigned int i_ = (
unsigned int)i;
4430 float u = m_trainKeyPoints[i_].pt.x;
4434 float v = m_trainKeyPoints[i_].pt.y;
4438 float size = m_trainKeyPoints[i_].size;
4442 float angle = m_trainKeyPoints[i_].angle;
4446 float response = m_trainKeyPoints[i_].response;
4450 int octave = m_trainKeyPoints[i_].octave;
4454 int class_id = m_trainKeyPoints[i_].class_id;
4458#ifdef VISP_HAVE_MODULE_IO
4459 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4460 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4469 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4480 for (
int j = 0; j < nCols; j++) {
4482 switch (descriptorType) {
4484 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
4485 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
4489 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
4521 pugi::xml_document doc;
4522 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4523 node.append_attribute(
"version") =
"1.0";
4524 node.append_attribute(
"encoding") =
"UTF-8";
4530 pugi::xml_node root_node = doc.append_child(
"LearningData");
4533 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
4535#ifdef VISP_HAVE_MODULE_IO
4536 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4537 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
4538 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4539 std::stringstream ss;
4541 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
4546 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
4548 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4549 int descriptorType = m_trainDescriptors.type();
4552 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
4555 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
4558 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
4560 for (
int i = 0; i < nRows; i++) {
4561 unsigned int i_ = (
unsigned int)i;
4562 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
4564 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4565 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4566 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4567 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4568 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4569 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4570 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4572#ifdef VISP_HAVE_MODULE_IO
4573 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4574 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
4575 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4577 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
4581 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4582 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4583 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4586 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
4588 for (
int j = 0; j < nCols; j++) {
4589 switch (descriptorType) {
4595 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4596 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4604 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4605 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4609 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4610 m_trainDescriptors.at<
unsigned short int>(i, j);
4614 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
4618 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
4622 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
4626 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
4636 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4640#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4641#ifndef DOXYGEN_SHOULD_SKIP_THIS
4643struct KeypointResponseGreaterThanThreshold {
4644 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) {}
4645 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4649struct KeypointResponseGreater {
4650 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4654void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4658 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
4659 if (n_points == 0) {
4665 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4667 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4670 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4671 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4674 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4678struct RoiPredicate {
4679 RoiPredicate(
const cv::Rect &_r) : r(_r) {}
4681 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4686void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4689 if (borderSize > 0) {
4690 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4693 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4694 RoiPredicate(cv::Rect(
4695 cv::Point(borderSize, borderSize),
4696 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4701struct SizePredicate {
4702 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4704 bool operator()(
const cv::KeyPoint &keyPt)
const
4706 float size = keyPt.size;
4707 return (size < minSize) || (size > maxSize);
4710 float minSize, maxSize;
4713void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4715 CV_Assert(minSize >= 0);
4716 CV_Assert(maxSize >= 0);
4717 CV_Assert(minSize <= maxSize);
4719 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4725 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) {}
4726 bool operator()(
const cv::KeyPoint &key_pt)
const
4728 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4735void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4740 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4743struct KeyPoint_LessThan {
4744 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4745 bool operator()(
size_t i,
size_t j)
const
4747 const cv::KeyPoint &kp1 = (*kp)[ i];
4748 const cv::KeyPoint &kp2 = (*kp)[ j];
4750 std::numeric_limits<float>::epsilon())) {
4752 return kp1.pt.x < kp2.pt.x;
4756 std::numeric_limits<float>::epsilon())) {
4758 return kp1.pt.y < kp2.pt.y;
4762 std::numeric_limits<float>::epsilon())) {
4764 return kp1.size > kp2.size;
4768 std::numeric_limits<float>::epsilon())) {
4770 return kp1.angle < kp2.angle;
4774 std::numeric_limits<float>::epsilon())) {
4776 return kp1.response > kp2.response;
4779 if (kp1.octave != kp2.octave) {
4780 return kp1.octave > kp2.octave;
4783 if (kp1.class_id != kp2.class_id) {
4784 return kp1.class_id > kp2.class_id;
4789 const std::vector<cv::KeyPoint> *kp;
4792void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4794 size_t i, j, n = keypoints.size();
4795 std::vector<size_t> kpidx(n);
4796 std::vector<uchar> mask(n, (uchar)1);
4798 for (i = 0; i < n; i++) {
4801 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4802 for (i = 1, j = 0; i < n; i++) {
4803 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4804 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4807 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4808 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4809 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4810 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4817 for (i = j = 0; i < n; i++) {
4820 keypoints[j] = keypoints[i];
4825 keypoints.resize(j);
4831vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4833 : detector(_detector), maxLevel(_maxLevel)
4837bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4839 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4842void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4843 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4845 detectImpl(image.getMat(), keypoints, mask.getMat());
4848void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4849 const cv::Mat &mask)
const
4851 cv::Mat src = image;
4852 cv::Mat src_mask = mask;
4854 cv::Mat dilated_mask;
4855 if (!mask.empty()) {
4856 cv::dilate(mask, dilated_mask, cv::Mat());
4857 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4858 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4859 dilated_mask = mask255;
4862 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4864 std::vector<cv::KeyPoint> new_pts;
4865 detector->detect(src, new_pts, src_mask);
4866 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4867 for (; it != end; ++it) {
4868 it->pt.x *= multiplier;
4869 it->pt.y *= multiplier;
4870 it->size *= multiplier;
4873 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4882 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4887 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4892#elif !defined(VISP_BUILD_SHARED_LIBS)
4894void dummy_vpKeyPoint(){};
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
Definition of the vpImage class member functions.
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double threshold=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
std::vector< unsigned int > getRansacInlierIndex() const
void setCovarianceComputation(const bool &flag)
std::vector< vpPoint > getRansacInliers() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void setUseParallelRansac(bool use)
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()