Visual Servoing Platform version 3.6.0
Loading...
Searching...
No Matches
tutorial-mb-generic-tracker-live.cpp
1
2#include <visp3/core/vpConfig.h>
3#ifdef VISP_HAVE_MODULE_SENSOR
4#include <visp3/sensor/vp1394CMUGrabber.h>
5#include <visp3/sensor/vp1394TwoGrabber.h>
6#include <visp3/sensor/vpFlyCaptureGrabber.h>
7#include <visp3/sensor/vpRealSense2.h>
8#include <visp3/sensor/vpV4l2Grabber.h>
9#endif
10#include <visp3/core/vpIoTools.h>
11#include <visp3/core/vpXmlParserCamera.h>
12#include <visp3/gui/vpDisplayGDI.h>
13#include <visp3/gui/vpDisplayOpenCV.h>
14#include <visp3/gui/vpDisplayX.h>
15#include <visp3/io/vpImageIo.h>
16#include <visp3/vision/vpKeyPoint.h>
18#include <visp3/mbt/vpMbGenericTracker.h>
20
21#if defined(HAVE_OPENCV_VIDEOIO)
22#include <opencv2/videoio.hpp>
23#endif
24
26// #undef VISP_HAVE_V4L2
27// #undef VISP_HAVE_DC1394
28// #undef VISP_HAVE_CMU1394
29// #undef VISP_HAVE_FLYCAPTURE
30// #undef VISP_HAVE_REALSENSE2
31// #undef VISP_HAVE_OPENCV
33
34int main(int argc, char **argv)
35{
36#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && \
37 (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
38 defined(HAVE_OPENCV_HIGHGUI) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2))
39
40 try {
41 std::string opt_modelname = "model/teabox/teabox.cao";
42 int opt_tracker = 2;
43 int opt_device = 0; // For OpenCV and V4l2 grabber to set the camera device
44 double opt_proj_error_threshold = 30.;
45 bool opt_use_ogre = false;
46 bool opt_use_scanline = false;
47 bool opt_display_projection_error = false;
48 bool opt_learn = false;
49 bool opt_auto_init = false;
50 std::string opt_learning_data = "learning/data-learned.bin";
51 std::string opt_intrinsic_file = "";
52 std::string opt_camera_name = "";
53
54 for (int i = 0; i < argc; i++) {
55 if (std::string(argv[i]) == "--model") {
56 opt_modelname = std::string(argv[i + 1]);
57 }
58 else if (std::string(argv[i]) == "--tracker") {
59 opt_tracker = atoi(argv[i + 1]);
60 }
61 else if (std::string(argv[i]) == "--camera_device" && i + 1 < argc) {
62 opt_device = atoi(argv[i + 1]);
63 }
64 else if (std::string(argv[i]) == "--max_proj_error") {
65 opt_proj_error_threshold = atof(argv[i + 1]);
66 }
67 else if (std::string(argv[i]) == "--use_ogre") {
68 opt_use_ogre = true;
69 }
70 else if (std::string(argv[i]) == "--use_scanline") {
71 opt_use_scanline = true;
72 }
73 else if (std::string(argv[i]) == "--learn") {
74 opt_learn = true;
75 }
76 else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
77 opt_learning_data = argv[i + 1];
78 }
79 else if (std::string(argv[i]) == "--auto_init") {
80 opt_auto_init = true;
81 }
82 else if (std::string(argv[i]) == "--display_proj_error") {
83 opt_display_projection_error = true;
84 }
85 else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
86 opt_intrinsic_file = std::string(argv[i + 1]);
87 }
88 else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
89 opt_camera_name = std::string(argv[i + 1]);
90 }
91 else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
92 std::cout
93 << "\nUsage: " << argv[0] << " [--camera_device <camera device> (default: 0)]"
94 << " [--intrinsic <intrinsic file> (default: empty)]"
95 << " [--camera_name <camera name> (default: empty)]"
96 << " [--model <model name> (default: teabox)]"
97 << " [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
98 << " [--use_ogre] [--use_scanline]"
99 << " [--max_proj_error <allowed projection error> (default: 30)]"
100 << " [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
101 << " [--display_proj_error]"
102 << " [--help] [-h]\n"
103 << std::endl;
104 return EXIT_SUCCESS;
105 }
106 }
107 std::string parentname = vpIoTools::getParent(opt_modelname);
108 std::string objectname = vpIoTools::getNameWE(opt_modelname);
109
110 if (!parentname.empty())
111 objectname = parentname + "/" + objectname;
112
113 std::cout << "Tracker requested config files: " << objectname << ".[init, cao]" << std::endl;
114 std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
115
116 std::cout << "Tracked features: " << std::endl;
117 std::cout << " Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
118 std::cout << " Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
119 std::cout << "Tracker options: " << std::endl;
120 std::cout << " Use ogre : " << opt_use_ogre << std::endl;
121 std::cout << " Use scanline: " << opt_use_scanline << std::endl;
122 std::cout << " Proj. error : " << opt_proj_error_threshold << std::endl;
123 std::cout << " Display proj. error: " << opt_display_projection_error << std::endl;
124 std::cout << "Config files: " << std::endl;
125 std::cout << " Config file : "
126 << "\"" << objectname + ".xml"
127 << "\"" << std::endl;
128 std::cout << " Model file : "
129 << "\"" << objectname + ".cao"
130 << "\"" << std::endl;
131 std::cout << " Init file : "
132 << "\"" << objectname + ".init"
133 << "\"" << std::endl;
134 std::cout << "Learning options : " << std::endl;
135 std::cout << " Learn : " << opt_learn << std::endl;
136 std::cout << " Auto init : " << opt_auto_init << std::endl;
137 std::cout << " Learning data: " << opt_learning_data << std::endl;
138
140#if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
141 vpImage<vpRGBa> I; // Since ViSP 3.2.0 we support model-based tracking on color images
142#else
143 vpImage<unsigned char> I; // Tracking on gray level images
144#endif
146
149 cam.initPersProjWithoutDistortion(839, 839, 325, 243);
151 vpXmlParserCamera parser;
152 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty())
153 parser.parse(cam, opt_intrinsic_file, opt_camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
154
158
160#if defined(VISP_HAVE_V4L2)
162 std::ostringstream device;
163 device << "/dev/video" << opt_device;
164 std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;
165 g.setDevice(device.str());
166 g.setScale(1);
167 g.open(I);
168#elif defined(VISP_HAVE_DC1394)
169 (void)opt_device; // To avoid non used warning
170 std::cout << "Use DC1394 grabber" << std::endl;
172 g.open(I);
173#elif defined(VISP_HAVE_CMU1394)
174 (void)opt_device; // To avoid non used warning
175 std::cout << "Use CMU1394 grabber" << std::endl;
177 g.open(I);
178#elif defined(VISP_HAVE_FLYCAPTURE)
179 (void)opt_device; // To avoid non used warning
180 std::cout << "Use FlyCapture grabber" << std::endl;
182 g.open(I);
183#elif defined(VISP_HAVE_REALSENSE2)
184 (void)opt_device; // To avoid non used warning
185 std::cout << "Use Realsense 2 grabber" << std::endl;
186 vpRealSense2 g;
187 rs2::config config;
188 config.disable_stream(RS2_STREAM_DEPTH);
189 config.disable_stream(RS2_STREAM_INFRARED);
190 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
191 g.open(config);
192 g.acquire(I);
193
194 std::cout << "Read camera parameters from Realsense device" << std::endl;
196#elif defined(HAVE_OPENCV_VIDEOIO)
197 std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;
198 cv::VideoCapture g(opt_device); // Open the default camera
199 if (!g.isOpened()) { // Check if we succeeded
200 std::cout << "Failed to open the camera" << std::endl;
201 return EXIT_FAILURE;
202 }
203 cv::Mat frame;
204 g >> frame; // get a new frame from camera
205 vpImageConvert::convert(frame, I);
206#endif
208
209 vpDisplay *display = NULL;
210#if defined(VISP_HAVE_X11)
211 display = new vpDisplayX;
212#elif defined(VISP_HAVE_GDI)
213 display = new vpDisplayGDI;
214#elif defined(HAVE_OPENCV_HIGHGUI)
215 display = new vpDisplayOpenCV;
216#endif
217 display->init(I, 100, 100, "Model-based tracker");
218
219 while (true) {
220#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
221 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
222 g.acquire(I);
223#elif defined(HAVE_OPENCV_VIDEOIO)
224 g >> frame;
225 vpImageConvert::convert(frame, I);
226#endif
227
229 vpDisplay::displayText(I, 20, 20, "Click when ready.", vpColor::red);
231
232 if (vpDisplay::getClick(I, false)) {
233 break;
234 }
235 }
236
238 vpMbGenericTracker tracker;
239 if (opt_tracker == 0)
241#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
242 else if (opt_tracker == 1)
244 else
246#else
247 else {
248#if !defined(VISP_HAVE_MODULE_KLT)
249 std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
250 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
251 << std::endl;
252#else
253 std::cout << "Hybrid tracking is impossible since OpenCV is not enabled. "
254 << "Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
255#endif
256 return EXIT_SUCCESS;
257 }
258#endif
260
261 bool usexml = false;
263 if (vpIoTools::checkFilename(objectname + ".xml")) {
264 tracker.loadConfigFile(objectname + ".xml");
265 usexml = true;
266 }
268
269 if (!usexml) {
271 if (opt_tracker == 0 || opt_tracker == 2) {
273 vpMe me;
274 me.setMaskSize(5);
275 me.setMaskNumber(180);
276 me.setRange(8);
278 me.setThreshold(20);
279 me.setMu1(0.5);
280 me.setMu2(0.5);
281 me.setSampleStep(4);
282 tracker.setMovingEdge(me);
284 }
285
286#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
287 if (opt_tracker == 1 || opt_tracker == 2) {
289 vpKltOpencv klt_settings;
290 klt_settings.setMaxFeatures(300);
291 klt_settings.setWindowSize(5);
292 klt_settings.setQuality(0.015);
293 klt_settings.setMinDistance(8);
294 klt_settings.setHarrisFreeParameter(0.01);
295 klt_settings.setBlockSize(3);
296 klt_settings.setPyramidLevels(3);
297 tracker.setKltOpencv(klt_settings);
298 tracker.setKltMaskBorder(5);
300 }
301#endif
302 }
303
304 tracker.setCameraParameters(cam);
306
308 tracker.loadModel(objectname + ".cao");
311 tracker.setDisplayFeatures(true);
314 tracker.setOgreVisibilityTest(opt_use_ogre);
315 tracker.setScanLineVisibilityTest(opt_use_scanline);
318 tracker.setProjectionErrorComputation(true);
319 tracker.setProjectionErrorDisplay(opt_display_projection_error);
321
322#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
323 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
324 std::string detectorName = "SIFT";
325 std::string extractorName = "SIFT";
326 std::string matcherName = "BruteForce";
327#else
328 std::string detectorName = "FAST";
329 std::string extractorName = "ORB";
330 std::string matcherName = "BruteForce-Hamming";
331#endif
332 vpKeyPoint keypoint;
333 if (opt_learn || opt_auto_init) {
334 keypoint.setDetector(detectorName);
335 keypoint.setExtractor(extractorName);
336 keypoint.setMatcher(matcherName);
337#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
338#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
339 keypoint.setDetectorParameter("ORB", "nLevels", 1);
340#else
341 cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
342 if (orb_detector) {
343 orb_detector->setNLevels(1);
344 }
345#endif
346#endif
347 }
348
349 if (opt_auto_init) {
350 if (!vpIoTools::checkFilename(opt_learning_data)) {
351 std::cout << "Cannot enable auto detection. Learning file \"" << opt_learning_data << "\" doesn't exist"
352 << std::endl;
353 return EXIT_FAILURE;
354 }
355 keypoint.loadLearningData(opt_learning_data, true);
356 }
357 else {
358 tracker.initClick(I, objectname + ".init", true);
359 }
360
361 bool learn_position = false;
362 bool run_auto_init = false;
363 if (opt_auto_init) {
364 run_auto_init = true;
365 }
366
367 // To be able to display keypoints matching with test-detection-rs2
368 int learn_id = 1;
369 unsigned int learn_cpt = 0;
370 bool quit = false;
371 bool tracking_failed = false;
372
373 while (!quit) {
374 double t_begin = vpTime::measureTimeMs();
375#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
376 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
377 g.acquire(I);
378#elif defined(HAVE_OPENCV_VIDEOIO)
379 g >> frame;
380 vpImageConvert::convert(frame, I);
381#endif
383
384 // Run auto initialization from learned data
385 if (run_auto_init) {
386 tracking_failed = false;
387 if (keypoint.matchPoint(I, cam, cMo)) {
388 std::cout << "Auto init succeed" << std::endl;
389 tracker.initFromPose(I, cMo);
390 }
391 else {
393 continue;
394 }
395 }
396 else if (tracking_failed) {
397 // Manual init
398 tracking_failed = false;
399 tracker.initClick(I, objectname + ".init", true);
400 }
401
402 // Run the tracker
403 try {
404 if (run_auto_init) {
405 // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
406 tracker.setDisplayFeatures(false);
407
408 run_auto_init = false;
409 }
410 tracker.track(I);
411 }
412 catch (const vpException &e) {
413 std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
414 tracking_failed = true;
415 if (opt_auto_init) {
416 std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
417 run_auto_init = true;
418 }
419 }
420
421 if (!tracking_failed) {
422 double proj_error = 0;
424 // Check tracking errors
425 proj_error = tracker.getProjectionError();
426 }
427 else {
428 tracker.getPose(cMo);
429 tracker.getCameraParameters(cam);
430 proj_error = tracker.computeCurrentProjectionError(I, cMo, cam);
431 }
432 if (proj_error > opt_proj_error_threshold) {
433 std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
434 if (opt_auto_init) {
435 run_auto_init = true;
436 }
437 tracking_failed = true;
438 }
439 }
440
441 if (!tracking_failed) {
442 tracker.setDisplayFeatures(true);
444 tracker.getPose(cMo);
447 tracker.getCameraParameters(cam);
448 tracker.display(I, cMo, cam, vpColor::green, 2, false);
450 vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
451
452 { // Display estimated pose in [m] and [deg]
453 vpPoseVector pose(cMo);
454 std::stringstream ss;
455 ss << "Translation: " << std::setprecision(5) << pose[0] << " " << pose[1] << " " << pose[2] << " [m]";
456 vpDisplay::displayText(I, 80, 20, ss.str(), vpColor::green);
457 ss.str(""); // erase ss
458 ss << "Rotation tu: " << std::setprecision(4) << vpMath::deg(pose[3]) << " " << vpMath::deg(pose[4]) << " "
459 << vpMath::deg(pose[5]) << " [deg]";
460 vpDisplay::displayText(I, 100, 20, ss.str(), vpColor::green);
461 }
462 {
463 std::stringstream ss;
464 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt();
465 vpDisplay::displayText(I, 120, 20, ss.str(), vpColor::red);
466 }
467 }
468
469 if (learn_position) {
470 learn_cpt++;
471 // Detect keypoints on the current image
472 std::vector<cv::KeyPoint> trainKeyPoints;
473 keypoint.detect(I, trainKeyPoints);
474
475 // Keep only keypoints on the cube
476 std::vector<vpPolygon> polygons;
477 std::vector<std::vector<vpPoint> > roisPt;
478 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
479 polygons = pair.first;
480 roisPt = pair.second;
481
482 // Compute the 3D coordinates
483 std::vector<cv::Point3f> points3f;
484 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
485
486 // Build the reference keypoints
487 keypoint.buildReference(I, trainKeyPoints, points3f, true, learn_id++);
488
489 // Display learned data
490 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
491 vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
492 }
493 learn_position = false;
494 std::cout << "Data learned" << std::endl;
495 }
496
497 std::stringstream ss;
498 ss << "Loop time: " << vpTime::measureTimeMs() - t_begin << " ms";
499 vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
500 if (opt_learn)
501 vpDisplay::displayText(I, 35, 20, "Left click: learn Right click: quit", vpColor::red);
502 else if (opt_auto_init)
503 vpDisplay::displayText(I, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
504 else
505 vpDisplay::displayText(I, 35, 20, "Right click: quit", vpColor::red);
506
508 if (vpDisplay::getClick(I, button, false)) {
509 if (button == vpMouseButton::button3) {
510 quit = true;
511 }
512 else if (button == vpMouseButton::button1 && opt_learn) {
513 learn_position = true;
514 }
515 else if (button == vpMouseButton::button1 && opt_auto_init && !opt_learn) {
516 run_auto_init = true;
517 }
518 }
519
521 }
522 if (opt_learn && learn_cpt) {
523 std::cout << "Save learning from " << learn_cpt << " images in file: " << opt_learning_data << std::endl;
524 keypoint.saveLearningData(opt_learning_data, true, true);
525 }
526
528 delete display;
530 }
531 catch (const vpException &e) {
532 std::cout << "Catch a ViSP exception: " << e << std::endl;
533 }
534#elif defined(VISP_HAVE_OPENCV)
535 (void)argc;
536 (void)argv;
537 std::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
538 "Realsense2), configure and build ViSP again to use this example"
539 << std::endl;
540#else
541 (void)argc;
542 (void)argv;
543 std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
544#endif
545}
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition vpColor.h:211
static const vpColor none
Definition vpColor.h:223
static const vpColor yellow
Definition vpColor.h:219
static const vpColor green
Definition vpColor.h:214
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:132
Class that defines generic functionalities for display.
Definition vpDisplay.h:173
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:59
const std::string & getStringMessage() const
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:135
static bool checkFilename(const std::string &filename)
static std::string getNameWE(const std::string &pathname)
static std::string getParent(const std::string &pathname)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition vpKeyPoint.h:212
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition vpKeyPoint.h:823
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
Definition vpKeyPoint.h:899
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition vpKeyPoint.h:765
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition vpKeyPoint.h:480
unsigned int buildReference(const vpImage< unsigned char > &I)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition vpKltOpencv.h:73
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Definition vpMath.h:106
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual int getTrackerType() const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void setProjectionErrorComputation(const bool &flag)
virtual unsigned int getNbFeaturesEdge() const
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void getCameraParameters(vpCameraParameters &camera) const
virtual void setMovingEdge(const vpMe &me)
virtual void setScanLineVisibilityTest(const bool &v)
virtual void setKltOpencv(const vpKltOpencv &t)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual void setProjectionErrorDisplay(bool display)
virtual void setTrackerType(int type)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setOgreVisibilityTest(const bool &v)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
virtual double getProjectionError() const
Definition vpMe.h:122
void setMu1(const double &mu_1)
Definition vpMe.h:353
void setSampleStep(const double &s)
Definition vpMe.h:390
void setRange(const unsigned int &r)
Definition vpMe.h:383
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:445
void setMaskSize(const unsigned int &a)
Definition vpMe.cpp:452
void setMu2(const double &mu_2)
Definition vpMe.h:360
@ NORMALIZED_THRESHOLD
Easy-to-use normalized likelihood threshold corresponding to the minimal luminance contrast to consid...
Definition vpMe.h:132
void setMaskNumber(const unsigned int &a)
Definition vpMe.cpp:445
void setThreshold(const double &t)
Definition vpMe.h:435
Implementation of a pose vector and operations on poses.
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
VISP_EXPORT double measureTimeMs()