ObjectLearningByPushing.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 
26 
27 #include "HypothesisGeneration.h"
30 #include "FeatureCalculation.h"
31 #include "GaussBackground.h"
32 #include "OLPTools.h"
33 #include "ObjectRecognition.h"
34 #include "SaliencyCalculation.h"
35 #include "PointCloudRegistration.h"
37 
38 
39 
40 // IVT
41 #include <Image/ByteImage.h>
42 #include <Image/ImageProcessor.h>
43 #include <Calibration/StereoCalibration.h>
44 #include <Calibration/Calibration.h>
45 #include <Math/LinearAlgebra.h>
46 #include <Math/FloatMatrix.h>
47 
48 
49 // OpenMP
50 #include <omp.h>
51 
52 
53 // VisionX
56 #include <VisionX/interface/components/Calibration.h>
57 
58 
59 // PCL
60 #include <pcl/point_types.h>
61 #include <pcl/point_cloud.h>
62 #include <pcl/io/pcd_io.h>
63 
64 
65 #include <sys/time.h>
66 #include <Eigen/Geometry>
67 
68 
69 
70 using namespace visionx;
71 using namespace armarx;
72 
73 
74 
75 
77 {
78  listener->resetHypothesesStatus();
79  currentState = eCreatingInitialHypotheses;
80 }
81 
82 
83 
85 {
86  listener->resetHypothesesStatus();
87  currentState = eValidatingInitialHypotheses;
88 }
89 
90 
91 
93 {
94  listener->resetHypothesesStatus();
95  currentState = eRevalidatingConfirmedHypotheses;
96 }
97 
98 
99 
101 {
102  types::PointList hypothesisPoints;
103  std::vector<CHypothesisPoint*>* pPoints;
104  SelectPreferredHypothesis(pPoints);
105 
106  if (pPoints)
107  {
108  for (size_t i = 0; i < pPoints->size(); i++)
109  {
110  Eigen::Vector3f p(pPoints->at(i)->vPosition.x, pPoints->at(i)->vPosition.y, pPoints->at(i)->vPosition.z);
111  armarx::Vector3Ptr point = new armarx::Vector3(p);
112  hypothesisPoints.push_back(point);
113  }
114  }
115 
116  return hypothesisPoints;
117 }
118 
119 
120 
121 
122 types::PointList ObjectLearningByPushing::getScenePoints(const Ice::Current& c)
123 {
124  types::PointList scenePoints;
125  std::vector<CHypothesisPoint*>* points = m_pAllNewDepthMapPoints;
126  if (m_pAllNewDepthMapPoints->size() == 0)
127  {
128  points = m_pAllOldDepthMapPoints;
129  }
130 
131  for (size_t i = 0; i < points->size(); i++)
132  {
133  Eigen::Vector3f p(points->at(i)->vPosition.x, points->at(i)->vPosition.y, points->at(i)->vPosition.z);
134  armarx::Vector3Ptr point = new armarx::Vector3(p);
135  scenePoints.push_back(point);
136  }
137  return scenePoints;
138 }
139 
140 
141 
142 
143 Vector3BasePtr ObjectLearningByPushing::getUpwardsVector(const Ice::Current& c)
144 {
145  Vector3BasePtr upVector = new Vector3(upwardsVector.x, upwardsVector.y, upwardsVector.z);
146  return upVector;
147 }
148 
149 
150 
151 std::string ObjectLearningByPushing::getReferenceFrameName(const Ice::Current& c)
152 {
153  return cameraFrameName;
154 }
155 
156 
157 
158 
160 {
161  std::vector<CHypothesisPoint*>* pPoints;
162  CObjectHypothesis* pHypothesis = SelectPreferredHypothesis(pPoints);
163  PoseBasePtr result = NULL;
164 
165  if (pHypothesis)
166  {
168  Eigen::Vector3f trans = tools::convertIVTtoEigen(pHypothesis->vLastTranslation);
169  result = new Pose(rot, trans);
170  }
171 
172  return result;
173 }
174 
175 
176 
177 void ObjectLearningByPushing::recognizeObject(const std::string& objectName, const Ice::Current& c)
178 {
179  UpdateDataFromRobot();
180  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation, true);
181 
182  m_pFeatureCalculation->GetAllFeaturePoints(colorImageLeft, greyImageLeft, pointCloudPtr, OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
183  *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
184 
185  RecognizeHypotheses(colorImageLeft, objectName);
186 }
187 
188 
189 
190 
192 {
193  // todo: remove this again when done with the old learned objects
194  //convertFileOLPtoPCL("/homes/staff/schieben/datalog/test/BunteKanne.dat");
195 
196  ARMARX_VERBOSE << "entering onInitPointCloudAndImageProcessor()";
197 
198  imageProviderName = getProperty<std::string>("ImageProviderAdapterName").getValue();
199  usingImageProvider(imageProviderName);
200 
201  robotStateProxyName = getProperty<std::string>("RobotStateProxyName").getValue();
202  usingProxy(robotStateProxyName);
203 
204 
205  cameraFrameName = getProperty<std::string>("CameraFrameName").getValue();
206 
207  connected = false;
208  currentState = eNoHypotheses;
209 
210 
211  offeringTopic("ObjectLearningByPushing");
212  pointcloudProviderName = getProperty<std::string>("PointCloudProviderAdapterName").getValue();
213  usingPointCloudProvider(pointcloudProviderName);
214 
215 
216  offeringTopic(getProperty<std::string>("DebugDrawerTopicName").getValue());
217 }
218 
219 
220 
222 {
223  ARMARX_VERBOSE << "entering onConnectPointCloudAndImageProcessor()";
224 
225  // connect to point cloud provider
226  ARMARX_INFO << "Connecting to " << pointcloudProviderName;
227  // pointcloudProviderProxy = getProxy<CapturingPointCloudAndImageAndCalibrationProviderInterfacePrx>(pointcloudProviderName);
228  // calibration = tools::convert(pointcloudProviderProxy->getMonocularCalibration()); // == left stereo image
229  pointcloudProviderProxy = getProxy<RGBDPointCloudProviderInterfacePrx>(pointcloudProviderName);
230  calibration = tools::convert(pointcloudProviderProxy->getStereoCalibration().calibrationLeft);
231 
232  if (cameraFrameName != pointcloudProviderProxy->getReferenceFrame())
233  {
234  ARMARX_WARNING << "Specified camera frame name: '" << cameraFrameName << "' does not match frame given by provider '" << pointcloudProviderProxy->getReferenceFrame() << "'!";
235  }
236 
237  // FIXME: workaround for depth camera sim frame missalignment
238  if (pointcloudProviderProxy->getReferenceFrame() == "DepthCameraSim")
239  {
240  ARMARX_WARNING << "Simulation detected. Applying fix for DepthCameraSim missalignment.";
241 
242  // see http://ivt.sourceforge.net/doxygen/class_c_calibration.html#details
243  Mat3d R = tools::convert(pointcloudProviderProxy->getStereoCalibration().calibrationLeft.cameraParam.rotation);
244  // Eigen::Matrix3f worldToCamera = (Eigen::Matrix3f() << R.r1, R.r2, R.r3, R.r4, R.r5, R.r6, R.r7, R.r8, R.r9).finished()
245  // Eigen::Matrix3f cameraToImg = Eigen::AngleAxisf(-M_PI / 2.0f, Eigen::Vector3f::UnitZ()).toRotationMatrix();
246  Eigen::Matrix3f worldToCamera;
247  worldToCamera << R.r1, R.r2, R.r3, R.r4, R.r5, R.r6, R.r7, R.r8, R.r9;
248  Eigen::Matrix3f cameraToImg;
249  cameraToImg << 0, -1, 0, 1, 0, 0, 0, 0, 1; // rotate -90° around z
250 
251  worldToCamera = cameraToImg * worldToCamera;
252  R.r1 = worldToCamera(0, 0);
253  R.r2 = worldToCamera(0, 1);
254  R.r3 = worldToCamera(0, 2);
255  R.r4 = worldToCamera(1, 0);
256  R.r5 = worldToCamera(1, 1);
257  R.r6 = worldToCamera(1, 2);
258  R.r7 = worldToCamera(2, 0);
259  R.r8 = worldToCamera(2, 1);
260  R.r9 = worldToCamera(2, 2);
261 
262  calibration->SetRotation(R);
263  }
264 
265  pointCloudPtr.reset(new pcl::PointCloud<pcl::PointXYZRGBA>());
266 
267  // connect to image provider
268  ARMARX_INFO << "Connecting to " << imageProviderName;
269  visionx::ImageProviderInfo imageProviderInfo = getImageProvider(imageProviderName);
270  imageProviderProxy = getProxy<ImageProviderInterfacePrx>(imageProviderName);
271 
272  colorImageLeft = tools::createByteImage(imageProviderInfo);
273  colorImageRight = tools::createByteImage(imageProviderInfo);
274 
275  colorImageLeftOld = new CByteImage(colorImageLeft);
276  greyImageLeft = new CByteImage(colorImageLeft->width, colorImageLeft->height, CByteImage::eGrayScale);
277  greyImageRight = new CByteImage(greyImageLeft);
278  resultImageLeft = new CByteImage(colorImageLeft);
279  resultImageRight = new CByteImage(colorImageLeft);
280  tempResultImage = new CByteImage(colorImageLeft);
281 
282  cameraImages = new CByteImage*[2];
283  cameraImages[0] = colorImageLeft;
284  cameraImages[1] = colorImageRight;
285 
286  resultImages = new CByteImage*[2];
287  resultImages[0] = resultImageLeft;
288  resultImages[1] = resultImageRight;
289 
290  m_pFeatureCalculation = new CFeatureCalculation();
291 
292  // CHypothesisGeneration creates the initial object hypotheses
293  m_pHypothesisGeneration = new CHypothesisGeneration(calibration);
294 
295  // visualizes the hypotheses
296  m_pHypothesisVisualization = new CHypothesisVisualization(calibration);
297 
298 
299  // screenshots, if wanted
300  m_sScreenshotPath = OLP_SCREENSHOT_PATH;
301  m_bMakeIntermediateScreenshots = OLP_MAKE_INTERMEDIATE_SCREENSHOTS;
302 
303  iterationCounter = 0;
304 
305 
306  ARMARX_INFO << "Connecting to " << robotStateProxyName;
307  robotStateProxy = getProxy<RobotStateComponentInterfacePrx>(robotStateProxyName);
308 
309 
310  localRobot = RemoteRobot::createLocalCloneFromFile(robotStateProxy, VirtualRobot::RobotIO::RobotDescription::eFull);
311 
312 
313  int nNumOMPThreads;
314 #if defined OMP_NUM_THREADS
315  nNumOMPThreads = OMP_NUM_THREADS;
316 #elif defined OLP_USE_DBVISION
317  nNumOMPThreads = omp_get_num_procs() - 1;
318 #else
319  nNumOMPThreads = omp_get_num_procs();
320 #endif
321 
322  if (nNumOMPThreads < 1)
323  {
324  nNumOMPThreads = 1;
325  }
326 
327  omp_set_num_threads(nNumOMPThreads);
328 
329  m_pObjectHypotheses = new CObjectHypothesisArray();
330  m_pConfirmedHypotheses = new CObjectHypothesisArray();
331  m_pInitialHypothesesAtLocalMaxima = new CObjectHypothesisArray();
332  m_pAllNewSIFTPoints = new CSIFTFeatureArray();
333  m_pAllOldSIFTPoints = new CSIFTFeatureArray();
334  m_pAllNewMSERs = new std::vector<CMSERDescriptor3D*>();
335  m_pAllOldMSERs = new std::vector<CMSERDescriptor3D*>();
336  m_pCorrespondingMSERs = new std::vector<CMSERDescriptor3D*>();
337  m_pAllNewDepthMapPoints = new std::vector<CHypothesisPoint*>();
338  m_pAllOldDepthMapPoints = new std::vector<CHypothesisPoint*>();
339 
340  m_pGaussBackground = new CGaussBackground(OLP_IMG_WIDTH, OLP_IMG_HEIGHT);
341 
342  Math3d::SetVec(m_tHeadToPlatformTransformation.translation, Math3d::zero_vec);
343  Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, Math3d::zero_vec);
344  Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, Math3d::unit_mat);
345  Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, Math3d::unit_mat);
346 
347  Math3d::SetVec(upwardsVector, Math3d::zero_vec);
348 
349 
350  listener = getTopic<ObjectLearningByPushingListenerPrx>("ObjectLearningByPushing");
351 
352  enableResultImages(2, imageProviderInfo.imageFormat.dimension, imageProviderInfo.destinationImageType);
353 
354  // debug
355  m_pSegmentedBackgroundImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
356  m_pDisparityImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
357 
358  connected = true;
359 
360  debugDrawer = getTopic<DebugDrawerInterfacePrx>(getProperty<std::string>("DebugDrawerTopicName").getValue());
361  debugDrawer->clearLayer(getName());
362 
363  ARMARX_IMPORTANT << "init finished";
364 }
365 
366 
367 
368 
369 
371 {
372  ARMARX_VERBOSE << "entering onExitPointCloudAndImageProcessor()";
373 
374  delete m_pHypothesisVisualization;
375  delete m_pHypothesisGeneration;
376 
377  for (int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
378  {
379  delete (*m_pObjectHypotheses)[i];
380  }
381 
382  delete m_pObjectHypotheses;
383 
384  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
385  {
386  delete (*m_pConfirmedHypotheses)[i];
387  }
388 
389  delete m_pConfirmedHypotheses;
390  delete m_pInitialHypothesesAtLocalMaxima;
391 
392 
393  for (int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
394  {
395  delete (*m_pAllNewSIFTPoints)[i];
396  }
397 
398  delete m_pAllNewSIFTPoints;
399 
400  for (int i = 0; i < m_pAllOldSIFTPoints->GetSize(); i++)
401  {
402  delete (*m_pAllOldSIFTPoints)[i];
403  }
404 
405  delete m_pAllOldSIFTPoints;
406 
407  for (size_t i = 0; i < m_pAllNewMSERs->size(); i++)
408  {
409  delete m_pAllNewMSERs->at(i);
410  }
411 
412  delete m_pAllNewMSERs;
413 
414  for (size_t i = 0; i < m_pAllOldMSERs->size(); i++)
415  {
416  delete m_pAllOldMSERs->at(i);
417  }
418 
419  delete m_pAllOldMSERs;
420 
421  for (size_t i = 0; i < m_pCorrespondingMSERs->size(); i++)
422  {
423  delete m_pCorrespondingMSERs->at(i);
424  }
425 
426  delete m_pCorrespondingMSERs;
427 
428  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
429  {
430  delete m_pAllNewDepthMapPoints->at(i);
431  }
432 
433  delete m_pAllNewDepthMapPoints;
434 
435  for (size_t i = 0; i < m_pAllOldDepthMapPoints->size(); i++)
436  {
437  delete m_pAllOldDepthMapPoints->at(i);
438  }
439 
440  delete m_pAllOldDepthMapPoints;
441 
442  delete m_pGaussBackground;
443 }
444 
445 
446 
447 
448 
450 {
451  if (!connected)
452  {
453  ARMARX_INFO << "process() called before ready (weird race condition with the ImageProcessor::process)";
454  return;
455  }
456 
457  bool bMakeScreenshots = OLP_MAKE_RESULT_SCREENSHOTS;
458 
459  std::lock_guard<std::mutex> lock(processingLock);
460 
461  switch (currentState)
462  {
463  case eCreatingInitialHypotheses:
464  {
465  // get new camera images and camera pose from robot
466  UpdateDataFromRobot();
467  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation, true);
468 
469  // create initial object hypotheses
470  CreateInitialObjectHypothesesInternal(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
471 
472  // show hypotheses
473  VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, false, resultImageLeft, resultImageRight, bMakeScreenshots);
474  provideResultImages(resultImages);
475 
476  // return information about object hypotheses and go to new state
477  if (GetNumberOfNonconfirmedHypotheses() > 0)
478  {
479  ARMARX_INFO << "Found " << GetNumberOfNonconfirmedHypotheses() << " possible objects";
480 
481  // return object position etc
482  ReportObjectPositionInformationToObserver();
483  listener->reportInitialObjectHypothesesCreated(true);
484 
485  currentState = eHasInitialHypotheses;
486  }
487  else
488  {
489  ARMARX_INFO << "No object hypothesis found";
490 
491  listener->reportInitialObjectHypothesesCreated(false);
492 
493  currentState = eNoHypotheses;
494  }
495 
496  iterationCounter++;
497 
498  break;
499  }
500 
501  case eValidatingInitialHypotheses:
502  {
503  // get new camera images and camera pose from robot
504  ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
505  UpdateDataFromRobot();
506  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation, true);
507 
508  // try to validate the hypotheses
509  bool validationSuccessful = ValidateInitialObjectHypothesesInternal(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
510 
511  // show initial hypotheses plus those generated in changed image areas
512  VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeftOld, colorImageRight, false, resultImageRight, tempResultImage, bMakeScreenshots);
513  // show validated hypotheses
514  VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, true, resultImageLeft, tempResultImage, bMakeScreenshots);
515  provideResultImages(resultImages);
516 
517  if (validationSuccessful)
518  {
519  ARMARX_INFO << "At least one object hypothesis was confirmed";
520 
521  // return object position etc
522  ReportObjectPositionInformationToObserver();
523  listener->reportObjectHypothesesValidated(true);
524 
525  currentState = eHasConfirmedHypotheses;
526  }
527  else
528  {
529  ARMARX_INFO << "No object hypothesis was confirmed";
530 
531  listener->reportObjectHypothesesValidated(false);
532 
533  currentState = eNoHypotheses;
534  }
535 
536  iterationCounter++;
537 
538  break;
539  }
540 
541  case eRevalidatingConfirmedHypotheses:
542  {
543  // get new camera images and camera pose from robot
544  ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
545  UpdateDataFromRobot();
546  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation, true);
547 
548  // try to revalidate the hypotheses
549  bool validationSuccessful = RevalidateConfirmedObjectHypothesesInternal(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
550 
551  // show validated hypothesis
552  VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, true, resultImageLeft, resultImageRight, bMakeScreenshots);
553  provideResultImages(resultImages);
554 
555  ARMARX_INFO << "The confirmed object hypotheses were revalidated";
556  ARMARX_IMPORTANT << "Was revalidation successful: " << validationSuccessful;
557 
558  // return object position etc
559  ReportObjectPositionInformationToObserver();
560  listener->reportObjectHypothesesValidated(validationSuccessful);
561 
562  currentState = eHasConfirmedHypotheses;
563 
564  iterationCounter++;
565 
566  break;
567  }
568 
569  case eHasInitialHypotheses:
570  case eHasConfirmedHypotheses:
571  {
572  provideResultImages(resultImages);
573  usleep(300000);
574  break;
575  }
576 
577  case eNoHypotheses:
578  {
579  UpdateDataFromRobot();
580  ::ImageProcessor::CopyImage(cameraImages[0], resultImages[0]);
581  ::ImageProcessor::CopyImage(cameraImages[0], resultImages[1]);
582  provideResultImages(resultImages);
583  usleep(300000);
584  break;
585  }
586 
587  case eQuit:
588  {
589  ARMARX_IMPORTANT << "Received quit signal, but dont know what to do!";
590  break;
591  }
592 
593  default:
594  {
595  ARMARX_WARNING << "The current mode is not handled here!";
596  break;
597  }
598  }
599 }
600 
601 
602 
603 
604 void ObjectLearningByPushing::SwapAllPointsArraysToOld()
605 {
606  CSIFTFeatureArray* pTempSIFTPoints;
607  pTempSIFTPoints = m_pAllOldSIFTPoints;
608  m_pAllOldSIFTPoints = m_pAllNewSIFTPoints;
609  m_pAllNewSIFTPoints = pTempSIFTPoints;
610 
611  for (int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
612  {
613  delete (*m_pAllNewSIFTPoints)[i];
614  }
615 
616  m_pAllNewSIFTPoints->Clear();
617 
618  std::vector<CMSERDescriptor3D*>* pTempMSERs;
619  pTempMSERs = m_pAllOldMSERs;
620  m_pAllOldMSERs = m_pAllNewMSERs;
621  m_pAllNewMSERs = pTempMSERs;
622 
623  for (size_t i = 0; i < m_pAllNewMSERs->size(); i++)
624  {
625  delete m_pAllNewMSERs->at(i);
626  }
627 
628  m_pAllNewMSERs->clear();
629 
630  std::vector<CHypothesisPoint*>* pTempDepthMapPoints;
631  pTempDepthMapPoints = m_pAllOldDepthMapPoints;
632  m_pAllOldDepthMapPoints = m_pAllNewDepthMapPoints;
633  m_pAllNewDepthMapPoints = pTempDepthMapPoints;
634 
635  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
636  {
637  delete m_pAllNewDepthMapPoints->at(i);
638  }
639 
640  m_pAllNewDepthMapPoints->clear();
641 }
642 
643 
644 
645 
646 void ObjectLearningByPushing::CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight,
647  CByteImage* pImageColorLeft, CByteImage* pImageColorRight, int nImageNumber)
648 {
649  // clean up
650  for (int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
651  {
652  delete (*m_pObjectHypotheses)[i];
653  }
654 
655  m_pObjectHypotheses->Clear();
656 
657  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
658  {
659  delete (*m_pConfirmedHypotheses)[i];
660  }
661 
662  m_pConfirmedHypotheses->Clear();
663  m_pInitialHypothesesAtLocalMaxima->Clear();
664 
665  // get all interest points with descriptors
666  //m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageColorRight, pImageGreyLeft, pImageGreyRight, 2*OLP_DEPTH_MAP_PIXEL_DISTANCE,
667  // m_pStereoCalibration, *m_pAllNewSIFTPoints, *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, m_pDisparityImage);
668  m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr, 2 * OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
669  *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
670  //m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr, OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
671  // *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
672 
673  if (m_bMakeIntermediateScreenshots)
674  {
675  std::string sScreenshotFile = m_sScreenshotPath + "disp0000.bmp";
676  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
677  m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
678 
679  // // TODO: test!
680  // CByteImage* pEdges = new CByteImage(m_pDisparityImage);
681  // COLPTools::CalculateEdgeImageFromImageAndDisparity(pImageGreyLeft, m_pDisparityImage, pEdges);
682  // sScreenshotFile = m_sScreenshotPath+"edges0000.bmp";
683  // COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
684  // pEdges->SaveToFile(sScreenshotFile.c_str());
685  // delete pEdges;
686  }
687 
688 
689  // learn background
690  m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
691 
692  // create hypotheses
693  m_pHypothesisGeneration->FindObjectHypotheses(pImageColorLeft, pImageColorRight, pImageGreyLeft, pImageGreyRight,
694  *m_pAllNewSIFTPoints, *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, *m_pObjectHypotheses);
695 
696 
697 #ifdef OLP_FILTER_INITIAL_HYPOTHESES_WITH_MAXIMUMNESS
698 
699  // remove hypotheses that are not local maxima
700  std::vector<Vec3d> aMaxima;
701  CByteImage* pMaximumnessImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
702  CSaliencyCalculation::FindLocalMaxima(*m_pAllNewDepthMapPoints, m_tHeadToPlatformTransformation.rotation, m_tHeadToPlatformTransformation.translation,
703  calibration, aMaxima, pMaximumnessImage);
704 
705  if (m_bMakeIntermediateScreenshots)
706  {
707  std::string sScreenshotFile = m_sScreenshotPath + "max0000.bmp";
708  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
709  pMaximumnessImage->SaveToFile(sScreenshotFile.c_str());
710  }
711 
712  for (int n = 0; n < m_pObjectHypotheses->GetSize(); n++)
713  {
714  CObjectHypothesis* pHypothesis = (*m_pObjectHypotheses)[n];
715  float fForegroundRatio;
716  int nNumForegroundPoints;
717  COLPTools::CalculateForegroundRatioOfHypothesis(pHypothesis, pMaximumnessImage, calibration, fForegroundRatio, nNumForegroundPoints);
718 
719  //ARMARX_VERBOSE << "Hypo " << n << ": " << pHypothesis->aNewPoints.size() << " p, maxness " << 100*fForegroundRatio;
720  if (fForegroundRatio > 0.4f)
721  {
722  m_pInitialHypothesesAtLocalMaxima->AddElement(pHypothesis);
723  }
724  }
725 
726  if ((m_pInitialHypothesesAtLocalMaxima->GetSize() == 0) && (m_pObjectHypotheses->GetSize() > 0))
727  {
728  m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[0]);
729  }
730 
731  m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft, pImageColorRight, *m_pInitialHypothesesAtLocalMaxima, *m_pAllOldSIFTPoints,
732  *m_pAllOldMSERs, *m_pCorrespondingMSERs, false, NULL, NULL, m_bMakeIntermediateScreenshots);
733 
734  delete pMaximumnessImage;
735 #else
736 
737  for (int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
738  {
739  m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[i]);
740  }
741 
742 #endif
743 
744  // copy all points to arrays for old ones
745  SwapAllPointsArraysToOld();
746 }
747 
748 
749 
750 
751 bool ObjectLearningByPushing::ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight,
752  CByteImage* pImageColorLeft, CByteImage* pImageColorRight, int nImageNumber)
753 {
754  //timeval tStart, tEnd;
755 
756  // clean up
757  for (int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
758  {
759  delete m_pCorrespondingMSERs->at(i);
760  }
761 
762  m_pCorrespondingMSERs->clear();
763 
764  // calculate background segmentation
765  //gettimeofday(&tStart, 0);
766  m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
767  BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
768  m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
769 
770  //gettimeofday(&tEnd, 0);
771  //long tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
772  //ARMARX_VERBOSE << "Time for background: " << tTimeDiff << " ms";
773  if (m_bMakeIntermediateScreenshots)
774  {
775  std::string sScreenshotFile = m_sScreenshotPath + "bg0000.bmp";
776  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
777  m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
778  }
779 
780  // find new features in the actual image
781  m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr, OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
782  *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
783 
784 
785  if (m_bMakeIntermediateScreenshots)
786  {
787  std::string sScreenshotFile = m_sScreenshotPath + "disp0000.bmp";
788  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
789  m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
790 
791  // TODO: test!
792  CByteImage* pEdges = new CByteImage(m_pDisparityImage);
793  COLPTools::CalculateEdgeImageFromImageAndDisparity(pImageGreyLeft, m_pDisparityImage, pEdges);
794  sScreenshotFile = m_sScreenshotPath + "edges0000.bmp";
795  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
796  pEdges->SaveToFile(sScreenshotFile.c_str());
797  delete pEdges;
798  }
799 
800  CByteImage* pHSVImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
801  ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
802 
803 
804  CHypothesisValidationRGBD::ValidateInitialHypotheses(*m_pAllOldDepthMapPoints, *m_pAllNewDepthMapPoints, m_pSegmentedBackgroundImage, pHSVImage, calibration, upwardsVector, m_pObjectHypotheses, m_pConfirmedHypotheses);
805 
806 
807  int nNumValidatedObjects = m_pConfirmedHypotheses->GetSize();
808 
809  // copy all points to arrays for old ones
810  SwapAllPointsArraysToOld();
811 
812  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
813  {
814  ARMARX_VERBOSE << "Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber << " has been revalidated. "
815  << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size() << " confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size()
816  << " new candidates. Position: (" << (*m_pConfirmedHypotheses)[i]->vCenter.x << ", " << (*m_pConfirmedHypotheses)[i]->vCenter.y
817  << ", " << (*m_pConfirmedHypotheses)[i]->vCenter.z << ")";
818  }
819 
820  // sort by size
821  for (int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
822  {
823  for (int j = i; j > 0; j--)
824  {
825  if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >= (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
826  {
827  break;
828  }
829 
830  CObjectHypothesis* pTemp = (*m_pConfirmedHypotheses)[j];
831  (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
832  (*m_pConfirmedHypotheses)[j - 1] = pTemp;
833  }
834  }
835 
837  {
838  SaveHistogramOfConfirmedHypothesis("NewObject", nImageNumber);
839  }
840 
841  delete pHSVImage;
842 
843  return (nNumValidatedObjects > 0);
844 }
845 
846 
847 
848 
849 bool ObjectLearningByPushing::RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight, CByteImage* pImageColorLeft, CByteImage* pImageColorRight, int nImageNumber)
850 {
851  //timeval tStart, tEnd;
852 
853  // clean up
854  for (int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
855  {
856  delete m_pCorrespondingMSERs->at(i);
857  }
858 
859  m_pCorrespondingMSERs->clear();
860 
861  // calculate background segmentation
862  //gettimeofday(&tStart, 0);
863  m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
864  BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
865  m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
866 
867  //gettimeofday(&tEnd, 0);
868  //long tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
869  //ARMARX_VERBOSE << "Time for background: " << tTimeDiff << " ms";
870  if (m_bMakeIntermediateScreenshots)
871  {
872  std::string sScreenshotFile = m_sScreenshotPath + "bg0000.bmp";
873  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
874  m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
875  }
876 
877 
878 
879  // find new features in the actual image
880  m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr, OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
881  *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
882 
883 
885  {
886  ARMARX_INFO << "Saving complete scene pointcloud";
887 
888  // get subset of depthmap without nan and (0,0,0) points
889  std::vector<Vec3d> all3DPoints;
890 
891  for (size_t i = 0; i < pointCloudPtr->size(); i++)
892  {
893  pcl::PointXYZRGBA point = pointCloudPtr->at(i);
894 
895  if (!isnan(point.x))
896  {
897  if (point.x * point.x + point.y * point.y + point.z * point.z > 0)
898  {
899  Vec3d point3d = {point.x, point.y, point.z};
900  all3DPoints.push_back(point3d);
901  }
902  }
903  }
904 
905  // all 3D points
906  setlocale(LC_NUMERIC, "C");
907  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
908  sFileName.append("all3Dpoints00.dat");
909  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
910  FILE* pFile = fopen(sFileName.c_str(), "wt");
911  fprintf(pFile, "%ld \n", all3DPoints.size());
912  Vec3d center = {0, 0, 0};
913 
914  for (size_t i = 0; i < all3DPoints.size(); i++)
915  {
916  fprintf(pFile, "%e %e %e \n", all3DPoints.at(i).x, all3DPoints.at(i).y, all3DPoints.at(i).z);
917  Math3d::AddToVec(center, all3DPoints.at(i));
918  }
919 
920  fclose(pFile);
921 
922  // all 3D points with zero mean
923  sFileName = OLP_OBJECT_LEARNING_DIR;
924  sFileName.append("all3Dpoints-centered00.dat");
925  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
926  pFile = fopen(sFileName.c_str(), "wt");
927  fprintf(pFile, "%ld \n", all3DPoints.size());
928  Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
929 
930  for (size_t i = 0; i < all3DPoints.size(); i++)
931  {
932  fprintf(pFile, "%e %e %e \n", all3DPoints.at(i).x - center.x, all3DPoints.at(i).y - center.y, all3DPoints.at(i).z - center.z);
933  }
934 
935  fclose(pFile);
936 
937  // all used points
938  setlocale(LC_NUMERIC, "C");
939  sFileName = OLP_OBJECT_LEARNING_DIR;
940  sFileName.append("allusedpoints00.dat");
941  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
942  pFile = fopen(sFileName.c_str(), "wt");
943  fprintf(pFile, "%ld \n", all3DPoints.size());
944  Math3d::SetVec(center, Math3d::zero_vec);
945 
946  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
947  {
948  fprintf(pFile, "%e %e %e \n", m_pAllNewDepthMapPoints->at(i)->vPosition.x, m_pAllNewDepthMapPoints->at(i)->vPosition.y,
949  m_pAllNewDepthMapPoints->at(i)->vPosition.z);
950  Math3d::AddToVec(center, m_pAllNewDepthMapPoints->at(i)->vPosition);
951  }
952 
953  fclose(pFile);
954 
955  // all used points with zero mean
956  sFileName = OLP_OBJECT_LEARNING_DIR;
957  sFileName.append("allusedpoints-centered00.dat");
958  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
959  pFile = fopen(sFileName.c_str(), "wt");
960  fprintf(pFile, "%ld \n", all3DPoints.size());
961  Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
962 
963  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
964  {
965  fprintf(pFile, "%e %e %e \n", m_pAllNewDepthMapPoints->at(i)->vPosition.x - center.x,
966  m_pAllNewDepthMapPoints->at(i)->vPosition.y - center.y, m_pAllNewDepthMapPoints->at(i)->vPosition.z - center.z);
967  }
968 
969  fclose(pFile);
970 
971  // camera image
972  sFileName = OLP_OBJECT_LEARNING_DIR;
973  sFileName.append("cameraimage00.bmp");
974  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
975  pImageColorLeft->SaveToFile(sFileName.c_str());
976 
977  // vector in direction of gravity (up)
978  sFileName = OLP_OBJECT_LEARNING_DIR;
979  sFileName.append("upwardsvector00.dat");
980  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
981  pFile = fopen(sFileName.c_str(), "wt");
982  fprintf(pFile, "%e %e %e \n", upwardsVector.x, upwardsVector.y, upwardsVector.z);
983  fclose(pFile);
984  }
985 
986 
987  if (m_bMakeIntermediateScreenshots)
988  {
989  std::string sScreenshotFile = m_sScreenshotPath + "disp0000.bmp";
990  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
991  m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
992  }
993 
994 
995  // re-validate the confirmed hypothesis
996 
997  CByteImage* pHSVImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
998  ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
999 
1000  CHypothesisValidationRGBD::RevalidateHypotheses(*m_pAllNewDepthMapPoints, m_pSegmentedBackgroundImage, pHSVImage, calibration, upwardsVector, m_pConfirmedHypotheses);
1001 
1002 
1003  // sort by size
1004  for (int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
1005  {
1006  for (int j = i; j > 0; j--)
1007  {
1008  if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >= (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
1009  {
1010  break;
1011  }
1012 
1013  CObjectHypothesis* pTemp = (*m_pConfirmedHypotheses)[j];
1014  (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
1015  (*m_pConfirmedHypotheses)[j - 1] = pTemp;
1016  }
1017  }
1018 
1019 
1020  // remove hypotheses that did not move the last 1 times, but keep at least one
1021  const int nNumTimesHypothesisMustHaveMoved = 1;
1022  bool nothingMoved = false;
1023 
1024  for (int i = m_pConfirmedHypotheses->GetSize() - 1; i >= 0 && m_pConfirmedHypotheses->GetSize() > 1; i--)
1025  {
1026  bool bHasMoved = false;
1027  const int nNumEntries = (*m_pConfirmedHypotheses)[i]->aHypothesisHasMoved.size();
1028 
1029  for (int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved; j--)
1030  {
1031  bHasMoved |= (*m_pConfirmedHypotheses)[i]->aHypothesisHasMoved.at(j);
1032  }
1033 
1034  if (!bHasMoved)
1035  {
1036  ARMARX_VERBOSE << "Removing hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber << " because it did not move the last " << nNumTimesHypothesisMustHaveMoved << " times.";
1037  delete (*m_pConfirmedHypotheses)[i];
1038 
1039  for (int k = i; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1040  {
1041  (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1042  }
1043 
1044  m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1045  }
1046  }
1047 
1048  // if none of the hypotheses moved, return false in the end
1049  if (m_pConfirmedHypotheses->GetSize() == 1)
1050  {
1051  bool bHasMoved = false;
1052  const int nNumEntries = (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.size();
1053 
1054  for (int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved; j--)
1055  {
1056  bHasMoved |= (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.at(j);
1057  }
1058 
1059  if (!bHasMoved)
1060  {
1061  nothingMoved = true;
1062  }
1063  }
1064 
1065 
1066 
1067  // fuse hypotheses that overlap significantly
1068  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1069  {
1070  for (int j = i + 1; j < m_pConfirmedHypotheses->GetSize(); j++)
1071  {
1072  float fOverlapRatio = COLPTools::GetHypothesesIntersectionRatio((*m_pConfirmedHypotheses)[i], (*m_pConfirmedHypotheses)[j], calibration);
1073  ARMARX_VERBOSE << "Overlap " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber << " " << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber << ": " << fOverlapRatio;
1074 
1075  if (fOverlapRatio > 0.5)
1076  {
1077  ARMARX_VERBOSE << "Removing hypothesis " << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber;
1078  delete (*m_pConfirmedHypotheses)[j];
1079 
1080  for (int k = j; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1081  {
1082  (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1083  }
1084 
1085  m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1086  j--;
1087  }
1088  }
1089  }
1090 
1091 
1092  // copy all points to arrays for old ones
1093  SwapAllPointsArraysToOld();
1094 
1095  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1096  {
1097  ARMARX_VERBOSE << "Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber << " has been revalidated. " << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size()
1098  << " confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size() << " new candidates. Position: ("
1099  << (*m_pConfirmedHypotheses)[i]->vCenter.x << ", " << (*m_pConfirmedHypotheses)[i]->vCenter.y
1100  << ", " << (*m_pConfirmedHypotheses)[i]->vCenter.z << ")";
1101  }
1102 
1104  {
1105  SaveHistogramOfConfirmedHypothesis("NewObject", nImageNumber);
1106  }
1107 
1108  delete pHSVImage;
1109 
1110  // return false if none of the hypotheses moved (or the movement estimation failed)
1111  return !nothingMoved;
1112 }
1113 
1114 
1115 
1116 
1117 
1118 void ObjectLearningByPushing::UpdateDataFromRobot()
1119 {
1120  if (!waitForPointClouds(pointcloudProviderName) || !waitForImages(imageProviderName))
1121  {
1122  ARMARX_WARNING << "Timeout or error while waiting for point cloud data";
1123  }
1124  else
1125  {
1126  if (getPointClouds(pointcloudProviderName, pointCloudPtr))
1127  {
1128  // get images
1129  Ice::Long timestamp;
1130  if (pointCloudPtr->isOrganized())
1131  {
1132  size_t h = cameraImages[0]->height; // pointCloudPtr->height;
1133  size_t w = cameraImages[0]->width; // pointCloudPtr->width;
1134 
1135  for (size_t y = 0; y < h; y++)
1136  {
1137  for (size_t x = 0; x < w; x++)
1138  {
1139  const pcl::PointXYZRGBA& p = pointCloudPtr->at(x, y);
1140  cameraImages[0]->pixels[3 * (y * w + x) + 0] = p.r;
1141  cameraImages[0]->pixels[3 * (y * w + x) + 1] = p.g;
1142  cameraImages[0]->pixels[3 * (y * w + x) + 2] = p.b;
1143  }
1144  }
1145 
1146  timestamp = pointCloudPtr->header.stamp;
1147  }
1148  else
1149  {
1150  armarx::MetaInfoSizeBasePtr metaInfo;
1151  int nNumberImages = getImages(imageProviderName, cameraImages, metaInfo);
1152  ARMARX_DEBUG << "got " << nNumberImages << " images";
1153 
1154  timestamp = metaInfo->timeProvided;
1155  }
1156 
1157  RemoteRobot::synchronizeLocalCloneToTimestamp(localRobot, robotStateProxy, timestamp);
1158 
1159  ::ImageProcessor::ConvertImage(colorImageLeft, greyImageLeft);
1160  //::ImageProcessor::ConvertImage(colorImageRight, greyImageRight);
1161 
1162  Eigen::Matrix4f cameraNodePose = localRobot->getRobotNode(cameraFrameName)->getPoseInRootFrame();
1163  tHeadToPlatformTransformation.rotation.r1 = cameraNodePose(0, 0);
1164  tHeadToPlatformTransformation.rotation.r2 = cameraNodePose(0, 1);
1165  tHeadToPlatformTransformation.rotation.r3 = cameraNodePose(0, 2);
1166  tHeadToPlatformTransformation.rotation.r4 = cameraNodePose(1, 0);
1167  tHeadToPlatformTransformation.rotation.r5 = cameraNodePose(1, 1);
1168  tHeadToPlatformTransformation.rotation.r6 = cameraNodePose(1, 2);
1169  tHeadToPlatformTransformation.rotation.r7 = cameraNodePose(2, 0);
1170  tHeadToPlatformTransformation.rotation.r8 = cameraNodePose(2, 1);
1171  tHeadToPlatformTransformation.rotation.r9 = cameraNodePose(2, 2);
1172  tHeadToPlatformTransformation.translation.x = cameraNodePose(0, 3);
1173  tHeadToPlatformTransformation.translation.y = cameraNodePose(1, 3);
1174  tHeadToPlatformTransformation.translation.z = cameraNodePose(2, 3);
1175 
1176  Eigen::Vector3f upInRootCS = {0, 0, 1};
1177  Eigen::Vector3f upInCamCS = cameraNodePose.block<3, 3>(0, 0).inverse() * upInRootCS;
1178  Math3d::SetVec(upwardsVector, upInCamCS(0), upInCamCS(1), upInCamCS(2));
1179  }
1180  }
1181 }
1182 
1183 
1184 
1185 
1186 bool ObjectLearningByPushing::SaveHistogramOfConfirmedHypothesis(std::string sObjectName, int nDescriptorNumber)
1187 {
1188  if (m_pConfirmedHypotheses->GetSize() > 0)
1189  {
1190  ARMARX_INFO << "Saving object descriptor";
1191  CObjectRecognition::SaveObjectDescriptorRGBD((*m_pConfirmedHypotheses)[0], sObjectName, nDescriptorNumber);
1192  CObjectRecognition::SaveObjectDescriptorPCD((*m_pConfirmedHypotheses)[0], sObjectName, nDescriptorNumber);
1193  return true;
1194  }
1195  else
1196  {
1197  ARMARX_VERBOSE << "No confirmed hypothesis that can be saved!";
1198  return false;
1199  }
1200 }
1201 
1202 
1203 
1204 
1205 void ObjectLearningByPushing::RecognizeHypotheses(CByteImage* pImageColorLeft, const std::string objectName)
1206 {
1207  CByteImage* pHSVImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24, false);
1208  ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1209 
1210  if (!objectName.empty())
1211  {
1212  CObjectRecognition::FindObjectWithManyDescriptorsRGBD(pHSVImage, pImageColorLeft, *m_pAllNewDepthMapPoints, calibration, upwardsVector, objectName, 5);
1213  }
1214  else if (m_pConfirmedHypotheses->GetSize() != 0 && false)
1215  {
1216  Vec3d vPosition;
1217  Mat3d mOrientation;
1218  float fdistance, fProbability;
1219  CObjectRecognition::FindObjectRGBD((*m_pConfirmedHypotheses)[0], pHSVImage, *m_pAllNewDepthMapPoints, calibration, upwardsVector, vPosition, mOrientation, fdistance, fProbability);
1220  }
1221  else if (false)
1222  {
1223  std::vector<std::string> aNames;
1224  std::vector<Vec3d> aPositions;
1225  std::vector<Mat3d> aOrientations;
1226  std::vector<float> aProbabilities;
1227  CObjectRecognition::FindAllObjectsRGBD(pHSVImage, pImageColorLeft, *m_pAllNewDepthMapPoints, calibration, upwardsVector, aNames, aPositions, aOrientations, aProbabilities);
1228  //CObjectRecognition::FindAllObjectsRFCH(pImageColorLeft, calibration, *m_pAllOldDepthMapPoints, aNames, aPositions, aOrientations, aProbabilities);
1229  }
1230  else if (false)
1231  {
1232  //CObjectHypothesis* pHypothesis;
1233  //Vec3d vPosition;
1234  //Mat3d mOrientation;
1235  //float fdistance, fProbability;
1236  //CObjectRecognition::LoadObjectDescriptorRGBD("BlueBowl", 3, pHypothesis);
1237  //CObjectRecognition::FindObject(pHypothesis, pHSVImage, pImageColorLeft, *m_pAllOldDepthMapPoints, calibration, vPosition, mOrientation, fdistance, fProbability);
1238  }
1239 
1240 
1241  delete pHSVImage;
1242 }
1243 
1244 
1245 
1246 
1247 void ObjectLearningByPushing::VisualizeHypotheses(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight,
1248  CByteImage* pImageColorLeft, CByteImage* pImageColorRight,
1249  bool bShowConfirmedHypotheses, CByteImage* resultImageLeft,
1250  CByteImage* resultImageRight, bool bMakeScreenshot)
1251 {
1252  if (bShowConfirmedHypotheses)
1253  {
1254  m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft, pImageColorRight, *m_pConfirmedHypotheses,
1255  *m_pAllOldSIFTPoints, *m_pAllOldMSERs, *m_pCorrespondingMSERs, true,
1256  resultImageLeft, resultImageRight, bMakeScreenshot);
1257  }
1258  else
1259  {
1260  m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft, pImageColorRight, *m_pObjectHypotheses,
1261  *m_pAllOldSIFTPoints, *m_pAllOldMSERs, *m_pCorrespondingMSERs, false,
1262  resultImageLeft, resultImageRight, bMakeScreenshot);
1263  }
1264 }
1265 
1266 
1267 
1268 
1269 
1270 
1271 Vec3d ObjectLearningByPushing::GetObjectPosition(float& fObjectExtent, bool bPreferCentralObject)
1272 {
1273  Vec3d vRet = {0, 0, 0};
1274  fObjectExtent = 0;
1275 
1276  std::vector<CHypothesisPoint*>* pPoints;
1277  CObjectHypothesis* pHypothesis = SelectPreferredHypothesis(pPoints, bPreferCentralObject);
1278 
1279  if (pHypothesis)
1280  {
1281  vRet.x = pHypothesis->vCenter.x;
1282  vRet.y = pHypothesis->vCenter.y;
1283  vRet.z = pHypothesis->vCenter.z;
1284  fObjectExtent = pHypothesis->fMaxExtent;
1285  }
1286 
1287  return vRet;
1288 }
1289 
1290 
1291 
1292 
1293 
1294 void ObjectLearningByPushing::ApplyHeadMotionTransformation(Mat3d mRotation, Vec3d vTranslation)
1295 {
1296  if (m_pConfirmedHypotheses->GetSize() > 0)
1297  {
1298  for (size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aNewPoints.size(); i++)
1299  {
1300  Math3d::MulMatVec(mRotation, (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition, vTranslation, (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition);
1301  }
1302 
1303  for (size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.size(); i++)
1304  {
1305  Math3d::MulMatVec(mRotation, (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition, vTranslation, (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition);
1306  }
1307 
1308  for (size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.size(); i++)
1309  {
1310  Math3d::MulMatVec(mRotation, (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition, vTranslation, (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition);
1311  }
1312  }
1313 }
1314 
1315 
1316 
1317 void ObjectLearningByPushing::GetHypothesisBoundingBox(int& nMinX, int& nMaxX, int& nMinY, int& nMaxY)
1318 {
1319  std::vector<CHypothesisPoint*>* pPoints;
1320  SelectPreferredHypothesis(pPoints);
1321 
1322  nMinX = 1000;
1323  nMaxX = -1000;
1324  nMinY = 1000;
1325  nMaxY = -1000;
1326  Vec2d vPoint2D;
1327 
1328  if (m_pConfirmedHypotheses->GetSize() > 0)
1329  {
1330  for (size_t i = 0; i < pPoints->size(); i++)
1331  {
1332  calibration->CameraToImageCoordinates(pPoints->at(i)->vPosition, vPoint2D, false);
1333 
1334  if (vPoint2D.x < nMinX)
1335  {
1336  nMinX = vPoint2D.x;
1337  }
1338 
1339  if (vPoint2D.x > nMaxX)
1340  {
1341  nMaxX = vPoint2D.x;
1342  }
1343 
1344  if (vPoint2D.y < nMinY)
1345  {
1346  nMinY = vPoint2D.y;
1347  }
1348 
1349  if (vPoint2D.y > nMaxY)
1350  {
1351  nMaxY = vPoint2D.y;
1352  }
1353  }
1354  }
1355 
1356  ARMARX_VERBOSE << "ObjectLearningByPushing::GetHypothesisBoundingBox: " << nMinX << " " << nMaxX << " " << nMinY << " " << nMaxY;
1357 }
1358 
1359 
1360 
1361 
1362 void ObjectLearningByPushing::GetHypothesisPrincipalAxesAndBoundingBox(Vec3d& vPrincipalAxis1, Vec3d& vPrincipalAxis2, Vec3d& vPrincipalAxis3,
1363  Vec3d& vEigenValues, Vec3d& vMaxExtentFromCenter,
1364  Vec2d& vBoundingBoxLU, Vec2d& vBoundingBoxRU, Vec2d& vBoundingBoxLL, Vec2d& vBoundingBoxRL)
1365 {
1366  std::vector<CHypothesisPoint*>* pPoints;
1367  SelectPreferredHypothesis(pPoints);
1368 
1369  const int nNumberOfSamples = pPoints->size();
1370 
1371  if (nNumberOfSamples < 3)
1372  {
1373  ARMARX_WARNING << "Hypothesis contains " << nNumberOfSamples << " points - aborting!";
1374  return;
1375  }
1376 
1377  // compute the principal axes with PCA
1378 
1379  Vec3d vAvgPoint = {0.0f, 0.0f, 0.0f};
1380 
1381  for (int i = 0; i < nNumberOfSamples; i++)
1382  {
1383  vAvgPoint.x += pPoints->at(i)->vPosition.x;
1384  vAvgPoint.y += pPoints->at(i)->vPosition.y;
1385  vAvgPoint.z += pPoints->at(i)->vPosition.z;
1386  }
1387 
1388  vAvgPoint.x /= (float)nNumberOfSamples;
1389  vAvgPoint.y /= (float)nNumberOfSamples;
1390  vAvgPoint.z /= (float)nNumberOfSamples;
1391  //ARMARX_VERBOSE << "vAvgPoint: (" << vAvgPoint.x << ", " << vAvgPoint.y << ", " << vAvgPoint.z << ")";
1392 
1393  CFloatMatrix mSamplePoints(3, nNumberOfSamples);
1394 
1395  for (int i = 0; i < nNumberOfSamples; i++)
1396  {
1397  mSamplePoints(0, i) = pPoints->at(i)->vPosition.x - vAvgPoint.x;
1398  mSamplePoints(1, i) = pPoints->at(i)->vPosition.y - vAvgPoint.y;
1399  mSamplePoints(2, i) = pPoints->at(i)->vPosition.z - vAvgPoint.z;
1400  }
1401 
1402  CFloatMatrix mEigenVectors(3, 3);
1403  CFloatMatrix mEigenValues(1, 3);
1404 
1405  LinearAlgebra::PCA(&mSamplePoints, &mEigenVectors, &mEigenValues);
1406 
1407  vPrincipalAxis1.x = mEigenVectors(0, 0);
1408  vPrincipalAxis1.y = mEigenVectors(1, 0);
1409  vPrincipalAxis1.z = mEigenVectors(2, 0);
1410 
1411  vPrincipalAxis2.x = mEigenVectors(0, 1);
1412  vPrincipalAxis2.y = mEigenVectors(1, 1);
1413  vPrincipalAxis2.z = mEigenVectors(2, 1);
1414 
1415  vPrincipalAxis3.x = mEigenVectors(0, 2);
1416  vPrincipalAxis3.y = mEigenVectors(1, 2);
1417  vPrincipalAxis3.z = mEigenVectors(2, 2);
1418 
1419  vEigenValues.x = mEigenValues(0, 0);
1420  vEigenValues.y = mEigenValues(0, 1);
1421  vEigenValues.z = mEigenValues(0, 2);
1422  //ARMARX_VERBOSE << "Eigenvalues: (" << pow(vEigenValues.x, 0.3333f) << ", " << pow(vEigenValues.y, 0.3333f) << ", " << pow(vEigenValues.z, 0.3333f) << ")";
1423 
1424  // matrix that transforms the 3D points into the coordinate system
1425  // given by the eigenvectors
1426  Mat3d mTransformation;
1427  mTransformation.r1 = mEigenVectors(0, 0);
1428  mTransformation.r2 = mEigenVectors(1, 0);
1429  mTransformation.r3 = mEigenVectors(2, 0);
1430  mTransformation.r4 = mEigenVectors(0, 1);
1431  mTransformation.r5 = mEigenVectors(1, 1);
1432  mTransformation.r6 = mEigenVectors(2, 1);
1433  mTransformation.r7 = mEigenVectors(0, 2);
1434  mTransformation.r8 = mEigenVectors(1, 2);
1435  mTransformation.r9 = mEigenVectors(2, 2);
1436 
1437  // find the maximal extent along the principal axes
1438  Vec3d vTransformedPoint;
1439  float fMaxExtentX = 0, fMaxExtentY = 0, fMaxExtentZ = 0;
1440 
1441  for (int i = 0; i < nNumberOfSamples; i++)
1442  {
1443  Vec3d vTempPoint = {mSamplePoints(0, i), mSamplePoints(1, i), mSamplePoints(2, i)}; // = original point minus average point
1444  Math3d::MulMatVec(mTransformation, vTempPoint, vTransformedPoint);
1445 
1446  if (fabsf(vTransformedPoint.x) > fMaxExtentX)
1447  {
1448  fMaxExtentX = fabsf(vTransformedPoint.x);
1449  }
1450 
1451  if (fabsf(vTransformedPoint.y) > fMaxExtentY)
1452  {
1453  fMaxExtentY = fabsf(vTransformedPoint.y);
1454  }
1455 
1456  if (fabsf(vTransformedPoint.z) > fMaxExtentZ)
1457  {
1458  fMaxExtentZ = fabsf(vTransformedPoint.z);
1459  }
1460  }
1461 
1462  vMaxExtentFromCenter.x = fMaxExtentX;
1463  vMaxExtentFromCenter.y = fMaxExtentY;
1464  vMaxExtentFromCenter.z = fMaxExtentZ;
1465  //ARMARX_VERBOSE << "vMaxExtentFromCenter: (" << vMaxExtentFromCenter.x << ", " << vMaxExtentFromCenter.y << ", " << vMaxExtentFromCenter.z << ")";
1466 
1467 
1468  // calculate the corners of the surface defined by the first two principal axes
1469  Vec3d vTranslationPrincipalAxis1, vTranslationPrincipalAxis2;
1470  Math3d::MulVecScalar(vPrincipalAxis1, 0.5f * (2.5f * pow(vEigenValues.x, 0.3333f) + vMaxExtentFromCenter.x), vTranslationPrincipalAxis1);
1471  Math3d::MulVecScalar(vPrincipalAxis2, 0.5f * (2.5f * pow(vEigenValues.y, 0.3333f) + vMaxExtentFromCenter.y), vTranslationPrincipalAxis2);
1472 
1473  Vec3d vCorner1, vCorner2, vCorner3, vCorner4;
1474 
1475  Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner1);
1476  Math3d::SubtractFromVec(vCorner1, vTranslationPrincipalAxis2);
1477 
1478  Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner2);
1479  Math3d::AddToVec(vCorner2, vTranslationPrincipalAxis2);
1480 
1481  Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner3);
1482  Math3d::SubtractFromVec(vCorner3, vTranslationPrincipalAxis2);
1483 
1484  Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner4);
1485  Math3d::AddToVec(vCorner4, vTranslationPrincipalAxis2);
1486 
1487  // project the corners into the image
1488  calibration->CameraToImageCoordinates(vCorner1, vBoundingBoxLU, false);
1489  calibration->CameraToImageCoordinates(vCorner2, vBoundingBoxRU, false);
1490  calibration->CameraToImageCoordinates(vCorner3, vBoundingBoxLL, false);
1491  calibration->CameraToImageCoordinates(vCorner4, vBoundingBoxRL, false);
1492 
1493  // sort the projected points: first assert that the two upper are above the two lower,
1494  // then check left and right for each pair
1495  Vec2d vTemp;
1496 
1497  if (vBoundingBoxLU.y > vBoundingBoxLL.y)
1498  {
1499  Math2d::SetVec(vTemp, vBoundingBoxLU);
1500  Math2d::SetVec(vBoundingBoxLU, vBoundingBoxLL);
1501  Math2d::SetVec(vBoundingBoxLL, vTemp);
1502  }
1503 
1504  if (vBoundingBoxLU.y > vBoundingBoxRL.y)
1505  {
1506  Math2d::SetVec(vTemp, vBoundingBoxLU);
1507  Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRL);
1508  Math2d::SetVec(vBoundingBoxRL, vTemp);
1509  }
1510 
1511  if (vBoundingBoxRU.y > vBoundingBoxLL.y)
1512  {
1513  Math2d::SetVec(vTemp, vBoundingBoxRU);
1514  Math2d::SetVec(vBoundingBoxRU, vBoundingBoxLL);
1515  Math2d::SetVec(vBoundingBoxLL, vTemp);
1516  }
1517 
1518  if (vBoundingBoxRU.y > vBoundingBoxRL.y)
1519  {
1520  Math2d::SetVec(vTemp, vBoundingBoxRU);
1521  Math2d::SetVec(vBoundingBoxRU, vBoundingBoxRL);
1522  Math2d::SetVec(vBoundingBoxRL, vTemp);
1523  }
1524 
1525  if (vBoundingBoxLU.x > vBoundingBoxRU.x)
1526  {
1527  Math2d::SetVec(vTemp, vBoundingBoxLU);
1528  Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRU);
1529  Math2d::SetVec(vBoundingBoxRU, vTemp);
1530  }
1531 
1532  if (vBoundingBoxLL.x > vBoundingBoxRL.x)
1533  {
1534  Math2d::SetVec(vTemp, vBoundingBoxLL);
1535  Math2d::SetVec(vBoundingBoxLL, vBoundingBoxRL);
1536  Math2d::SetVec(vBoundingBoxRL, vTemp);
1537  }
1538 
1539  //ARMARX_VERBOSE << "GetHypothesisPrincipalAxesAndBoundingBox(): BB: (" << vBoundingBoxLU.x << ", " << vBoundingBoxLU.y << ") (" << vBoundingBoxRU.x << ", "
1540  // << vBoundingBoxRU.y << ") (" << vBoundingBoxLL.x << ", " << vBoundingBoxLL.y << ") (" << vBoundingBoxRL.x << ", " << vBoundingBoxRL.y << ")";
1541 }
1542 
1543 
1544 void ObjectLearningByPushing::ReportObjectPositionInformationToObserver()
1545 {
1546  Vec3d vHypothesisPosition, vPrincipalAxis1, vPrincipalAxis2, vPrincipalAxis3, vEigenValues, vMaxExtents;
1547  Vec2d vBoundingBoxLeftUpper, vBoundingBoxRightUpper, vBoundingBoxLeftLower, vBoundingBoxRightLower;
1548  float fHypothesisExtent;
1549  vHypothesisPosition = GetObjectPosition(fHypothesisExtent);
1550  GetHypothesisPrincipalAxesAndBoundingBox(vPrincipalAxis1, vPrincipalAxis2, vPrincipalAxis3, vEigenValues, vMaxExtents,
1551  vBoundingBoxLeftUpper, vBoundingBoxRightUpper, vBoundingBoxLeftLower, vBoundingBoxRightLower);
1552 
1553  Eigen::Vector3f vec(vHypothesisPosition.x, vHypothesisPosition.y, vHypothesisPosition.z);
1554  armarx::FramedPositionPtr hypothesisPosition = new armarx::FramedPosition(vec, cameraFrameName, localRobot->getName());
1555  armarx::Vector3Ptr principalAxis1, principalAxis2, principalAxis3, eigenvalues;
1556  principalAxis1 = new armarx::Vector3();
1557  principalAxis1->x = vPrincipalAxis1.x;
1558  principalAxis1->y = vPrincipalAxis1.y;
1559  principalAxis1->z = vPrincipalAxis1.z;
1560  principalAxis2 = new armarx::Vector3();
1561  principalAxis2->x = vPrincipalAxis2.x;
1562  principalAxis2->y = vPrincipalAxis2.y;
1563  principalAxis2->z = vPrincipalAxis2.z;
1564  principalAxis3 = new armarx::Vector3();
1565  principalAxis3->x = vPrincipalAxis3.x;
1566  principalAxis3->y = vPrincipalAxis3.y;
1567  principalAxis3->z = vPrincipalAxis3.z;
1568  eigenvalues = new armarx::Vector3();
1569  eigenvalues->x = vEigenValues.x;
1570  eigenvalues->y = vEigenValues.y;
1571  eigenvalues->z = vEigenValues.z;
1572 
1573 
1574  FramedPositionPtr globalObjPose = hypothesisPosition->toGlobal(localRobot);
1575  debugDrawer->setSphereVisu(getName(), "GlobalObj", globalObjPose, DrawColor {0, 0, 1, 1}, 50.0);
1576 
1577  listener->reportObjectHypothesisPosition(hypothesisPosition, fHypothesisExtent, principalAxis1, principalAxis2, principalAxis3, eigenvalues);
1578 }
1579 
1580 
1581 
1582 
1583 void ObjectLearningByPushing::SetHeadToPlatformTransformation(Vec3d vTranslation, Mat3d mRotation, bool bResetOldTransformation)
1584 {
1585  if (bResetOldTransformation)
1586  {
1587  Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, vTranslation);
1588  Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, mRotation);
1589  }
1590  else
1591  {
1592  Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, m_tHeadToPlatformTransformation.translation);
1593  Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, m_tHeadToPlatformTransformation.rotation);
1594  }
1595 
1596  Math3d::SetVec(m_tHeadToPlatformTransformation.translation, vTranslation);
1597  Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, mRotation);
1598 }
1599 
1600 
1601 
1602 
1603 void ObjectLearningByPushing::LoadAndFuseObjectSegmentations(std::string sObjectName)
1604 {
1605 
1606  const int nFileIndexStart = 1;
1607 
1608  for (int nMaxFileIndex = 2; nMaxFileIndex <= 51; nMaxFileIndex++)
1609  {
1610  std::vector<CObjectHypothesis*> aObjectHypotheses;
1611  CObjectHypothesis* pHypothesis;
1612 
1613  if (!CObjectRecognition::LoadObjectDescriptorRGBD(sObjectName, nMaxFileIndex, pHypothesis))
1614  {
1615  return;
1616  }
1617 
1618  delete pHypothesis;
1619 
1620  int nFileIndex = nFileIndexStart;
1621 
1622  while (CObjectRecognition::LoadObjectDescriptorRGBD(sObjectName, nFileIndex, pHypothesis) && nFileIndex <= nMaxFileIndex)
1623  {
1624  aObjectHypotheses.push_back(pHypothesis);
1625  nFileIndex++;
1626  }
1627 
1628  pHypothesis = NULL;
1629  CSegmentedPointCloudFusion::FusePointClouds(aObjectHypotheses, pHypothesis);
1630 
1631  CObjectRecognition::SaveObjectDescriptorRGBD(pHypothesis, sObjectName + "-fused", aObjectHypotheses.size());
1632  CObjectRecognition::SaveObjectDescriptorPCD(pHypothesis, sObjectName + "-fused", aObjectHypotheses.size());
1633 
1634  for (size_t i = 0; i < pHypothesis->aVisibleConfirmedPoints.size(); i++)
1635  {
1636  pHypothesis->aVisibleConfirmedPoints.at(i)->vPosition.z += 250;
1637  }
1638 
1639  CByteImage* pImg = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
1640  COLPTools::DrawHypothesis(pHypothesis, calibration, pImg, aObjectHypotheses.size());
1641  std::string sFileName = OLP_OBJECT_LEARNING_DIR + sObjectName + "-fused00.bmp";
1642  COLPTools::SetNumberInFileName(sFileName, nMaxFileIndex, 2);
1643  pImg->SaveToFile(sFileName.c_str());
1644  delete pImg;
1645 
1646  for (size_t i = 0; i < aObjectHypotheses.size(); i++)
1647  {
1648  delete aObjectHypotheses.at(i);
1649  }
1650 
1651  delete pHypothesis;
1652  }
1653 }
1654 
1655 
1656 
1657 
1658 CObjectHypothesis* ObjectLearningByPushing::SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints, const bool bPreferCentralObject)
1659 {
1660  CObjectHypothesis* pHypothesis = NULL;
1661  pPoints = NULL;
1662 
1663  CObjectHypothesisArray* hypotheses = NULL;
1664 
1665  if (m_pConfirmedHypotheses->GetSize() > 0)
1666  {
1667  hypotheses = m_pConfirmedHypotheses;
1668  }
1669  else if (m_pInitialHypothesesAtLocalMaxima->GetSize() > 0)
1670  {
1671  hypotheses = m_pInitialHypothesesAtLocalMaxima;
1672  }
1673 
1674  if (hypotheses)
1675  {
1676  if (bPreferCentralObject)
1677  {
1678  // get central point in camera coordinates
1680  Mat3d mRotInv;
1681  Vec3d vCenter, vTemp;
1682  Math3d::SubtractVecVec(vCenterPlatform, m_tHeadToPlatformTransformation.translation, vTemp);
1683  Math3d::Transpose(m_tHeadToPlatformTransformation.rotation, mRotInv);
1684  Math3d::MulMatVec(mRotInv, vTemp, vCenter);
1685  ARMARX_VERBOSE << "Central point in platform cs: " << vCenterPlatform.x << ", " << vCenterPlatform.y << ", " << vCenterPlatform.z;
1686  ARMARX_VERBOSE << "m_tHeadToPlatformTransformation.translation: " << m_tHeadToPlatformTransformation.translation.x << ", " << m_tHeadToPlatformTransformation.translation.y << ", " << m_tHeadToPlatformTransformation.translation.z;
1687  ARMARX_VERBOSE << "m_tHeadToPlatformTransformation.rotation: " << m_tHeadToPlatformTransformation.rotation.r1 << ", " << m_tHeadToPlatformTransformation.rotation.r2 << ", " << m_tHeadToPlatformTransformation.rotation.r3 << ", "
1688  << m_tHeadToPlatformTransformation.rotation.r4 << ", " << m_tHeadToPlatformTransformation.rotation.r5 << ", " << m_tHeadToPlatformTransformation.rotation.r6 << ", "
1689  << m_tHeadToPlatformTransformation.rotation.r7 << ", " << m_tHeadToPlatformTransformation.rotation.r8 << ", " << m_tHeadToPlatformTransformation.rotation.r9;
1690  ARMARX_VERBOSE << "Central point in camera cs: " << vCenter.x << ", " << vCenter.y << ", " << vCenter.z;
1691  //Math3d::SetVec(vCenter, 0, 0, 500);
1692  //ARMARX_IMPORTANT << "Manually set central point in camera coordinates: " << vCenter.x << ", " << vCenter.y << ", " << vCenter.z;
1693 
1694 
1695  int nBestIndex = 0;
1696  float fMinDist = Math3d::Distance((*hypotheses)[0]->vCenter, vCenter);
1697 
1698  for (int i = 1; i < hypotheses->GetSize(); i++)
1699  {
1700  if (Math3d::Distance((*hypotheses)[i]->vCenter, vCenter) < fMinDist)
1701  {
1702  // only accept hypotheses that contain at least half as many points as the biggest one
1703  //if (2*(*m_pInitialHypothesesAtLocalMaxima)[i]->aNewPoints.size() > (*m_pInitialHypothesesAtLocalMaxima)[0]->aNewPoints.size())
1704  {
1705  fMinDist = Math3d::Distance((*hypotheses)[i]->vCenter, vCenter);
1706  nBestIndex = i;
1707  }
1708  }
1709  }
1710 
1711  pHypothesis = (*hypotheses)[nBestIndex];
1712  }
1713  else
1714  {
1715  pHypothesis = (*hypotheses)[0];
1716  }
1717  }
1718 
1719  if (pHypothesis)
1720  {
1721  if (pHypothesis->aConfirmedPoints.size() > 0)
1722  {
1723  pPoints = &(pHypothesis->aConfirmedPoints);
1724  }
1725  else
1726  {
1727  pPoints = &(pHypothesis->aNewPoints);
1728  }
1729 
1730  ARMARX_VERBOSE << "Hypothesis " << pHypothesis->nHypothesisNumber << " (" << pHypothesis->vCenter.x << ", " << pHypothesis->vCenter.y << ", "
1731  << pHypothesis->vCenter.z << ") is chosen for pushing";
1732  }
1733  else
1734  {
1735  ARMARX_WARNING << "No hypothesis available";
1736  }
1737 
1738  return pHypothesis;
1739 }
1740 
1741 
1742 
1743 void ObjectLearningByPushing::convertFileOLPtoPCL(std::string filename, bool includeCandidatePoints)
1744 {
1745  setlocale(LC_NUMERIC, "C");
1746 
1747  FILE* filePtr = fopen(filename.c_str(), "rt");
1748 
1749  if (filePtr == NULL)
1750  {
1751  ARMARX_WARNING << "Cannot open object file: " << filename;
1752  return;
1753  }
1754 
1755  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr objectPointCloud(new pcl::PointCloud<pcl::PointXYZRGBA>());
1756 
1757  // read the confirmed points
1758  size_t nNumPoints;
1759  float r, g, b, intensity;
1760  pcl::PointXYZRGBA point;
1761  int dummy = fscanf(filePtr, "%lu", &nNumPoints);
1762 
1763  for (size_t i = 0; i < nNumPoints; i++)
1764  {
1765  dummy += fscanf(filePtr, "%e %e %e", &point.x, &point.y, &point.z);
1766  dummy += fscanf(filePtr, "%e %e %e %e \n", &r, &g, &b, &intensity);
1767  point.r = r * 255 * intensity;
1768  point.g = g * 255 * intensity;
1769  point.b = b * 255 * intensity;
1770  objectPointCloud->push_back(point);
1771  }
1772 
1773  if (includeCandidatePoints)
1774  {
1775  // read the candidate points
1776  dummy += fscanf(filePtr, "%lu", &nNumPoints);
1777 
1778  for (size_t i = 0; i < nNumPoints; i++)
1779  {
1780  dummy += fscanf(filePtr, "%e %e %e", &point.x, &point.y, &point.z);
1781  dummy += fscanf(filePtr, "%e %e %e %e \n", &r, &g, &b, &intensity);
1782  point.r = r * 255 * intensity;
1783  point.g = g * 255 * intensity;
1784  point.b = b * 255 * intensity;
1785  objectPointCloud->push_back(point);
1786  }
1787  }
1788 
1789  int n = (int) filename.length();
1790  filename.at(n - 3) = 'p';
1791  filename.at(n - 2) = 'c';
1792  filename.at(n - 1) = 'd';
1793  pcl::io::savePCDFile(filename, *objectPointCloud);
1794 }
1795 
1796 
1797 void ObjectLearningByPushing::BoundingBoxInForegroundImage(CByteImage* image, int minX, int maxX, int minY, int maxY)
1798 {
1799  for (int i = 0; i < image->height; i++)
1800  {
1801  if (i < minY || i > maxY)
1802  {
1803  for (int j = 0; j < image->width; j++)
1804  {
1805  if (j < minX || j > maxX)
1806  {
1807  image->pixels[i * image->width + j] = 0;
1808  }
1809  }
1810  }
1811  }
1812 }
1813 
CObjectRecognition::SaveObjectDescriptorPCD
static void SaveObjectDescriptorPCD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:56
visionx::tools::convertIVTtoEigen
Eigen::Matrix3f convertIVTtoEigen(const Mat3d m)
Definition: TypeMapping.cpp:632
CObjectRecognition::FindObjectRGBD
static void FindObjectRGBD(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, Vec3d &vPosition, Mat3d &mOrientation, float &distance, float &fProbability, CByteImage *pResultImage=NULL)
Definition: ObjectRecognition.cpp:490
PointCloudRegistration.h
HypothesisGeneration.h
ARMARX_VERBOSE
#define ARMARX_VERBOSE
Definition: Logging.h:180
armarx::eFull
@ eFull
Definition: MorphingItem.h:35
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
visionx::ObjectLearningByPushing::getReferenceFrameName
std::string getReferenceFrameName(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:151
ARMARX_IMPORTANT
#define ARMARX_IMPORTANT
Definition: Logging.h:183
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CHypothesisValidationRGBD::ValidateInitialHypotheses
static void ValidateInitialHypotheses(const std::vector< CHypothesisPoint * > &aOldDepthMapPoints, const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesisArray *pObjectHypotheses, CObjectHypothesisArray *pConfirmedHypotheses)
Definition: HypothesisValidationRGBD.cpp:46
visionx
ArmarX headers.
Definition: OpenPoseStressTest.h:38
armarx::navigation::core::Pose
Eigen::Isometry3f Pose
Definition: basic_types.h:31
CSegmentedPointCloudFusion::FusePointClouds
void FusePointClouds(std::vector< CObjectHypothesis * > aHypotheses, CObjectHypothesis *&pFusedHypothesis)
Definition: SegmentedPointCloudFusion.cpp:38
OLP_DEPTH_MAP_PIXEL_DISTANCE
#define OLP_DEPTH_MAP_PIXEL_DISTANCE
Definition: ObjectLearningByPushingDefinitions.h:256
ObjectLearningByPushing.h
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:168
visionx::ObjectLearningByPushing::onInitPointCloudAndImageProcessor
void onInitPointCloudAndImageProcessor() override
Setup the vision component.
Definition: ObjectLearningByPushing.cpp:191
visionx::ObjectLearningByPushing::recognizeObject
void recognizeObject(const std::string &objectName, const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
Definition: ObjectLearningByPushing.cpp:177
OLP_SAVE_COMPLETE_POINTCLOUD
#define OLP_SAVE_COMPLETE_POINTCLOUD
Definition: ObjectLearningByPushingDefinitions.h:203
visionx::ObjectLearningByPushing::ValidateInitialObjectHypotheses
void ValidateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Validates the initial object hypotheses after the first push.
Definition: ObjectLearningByPushing.cpp:84
COLPTools::GetHypothesesIntersectionRatio
float GetHypothesesIntersectionRatio(const CObjectHypothesis *pHypothesis1, const CObjectHypothesis *pHypothesis2, const CCalibration *calibration)
Definition: OLPTools.cpp:1395
COLPTools::SetNumberInFileName
void SetNumberInFileName(std::string &sFileName, int nNumber, int nNumDigits)
Definition: OLPTools.cpp:1378
FeatureCalculation.h
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:694
CHypothesisValidationRGBD::RevalidateHypotheses
static void RevalidateHypotheses(const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesisArray *pObjectHypotheses)
Definition: HypothesisValidationRGBD.cpp:117
c
constexpr T c
Definition: UnscentedKalmanFilterTest.cpp:43
visionx::ImageProviderInfo::imageFormat
ImageFormatInfo imageFormat
Image format struct that contains all necessary image information.
Definition: ImageProcessor.h:496
COLPTools::CalculateForegroundRatioOfHypothesis
void CalculateForegroundRatioOfHypothesis(const CObjectHypothesis *pHypothesis, const CByteImage *pForegroundImage, const CCalibration *calibration, float &fForegroundRatio, int &nNumForegroundPixels)
Definition: OLPTools.cpp:336
visionx::tools::createByteImage
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.
HypothesisValidationRGBD.h
OLP_CENTRAL_POSITION_FOR_PUSHING_X
#define OLP_CENTRAL_POSITION_FOR_PUSHING_X
Definition: ObjectLearningByPushingDefinitions.h:318
visionx::ImageProviderInfo
Definition: ImageProcessor.h:466
CHypothesisVisualization
Definition: HypothesisVisualization.h:39
CGaussBackground
Definition: GaussBackground.h:51
IceInternal::Handle< Vector3 >
visionx::tools::convert
CByteImage::ImageType convert(const ImageType visionxImageType)
Converts a VisionX image type into an image type of IVT's ByteImage.
Definition: TypeMapping.cpp:95
visionx::ObjectLearningByPushing::onExitPointCloudAndImageProcessor
void onExitPointCloudAndImageProcessor() override
Exit the ImapeProcessor component.
Definition: ObjectLearningByPushing.cpp:370
CFeatureCalculation
Definition: FeatureCalculation.h:43
COLPTools::CalculateEdgeImageFromImageAndDisparity
void CalculateEdgeImageFromImageAndDisparity(CByteImage *pImageGray, CByteImage *pDisparity, CByteImage *pEdgeImage)
Definition: OLPTools.cpp:1615
visionx::ObjectLearningByPushing::RevalidateConfirmedObjectHypotheses
void RevalidateConfirmedObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Re-validates the confirmed object hypotheses after the second and later pushes.
Definition: ObjectLearningByPushing.cpp:92
OLPTools.h
CObjectHypothesis::vLastTranslation
Vec3d vLastTranslation
Definition: ObjectHypothesis.h:318
CSaliencyCalculation::FindLocalMaxima
void FindLocalMaxima(const std::vector< Vec3d > &aPoints3D, const std::vector< Vec2d > &aPointsInImage, std::vector< Vec3d > &aMaxima, const int nBinSizeInPx, CByteImage *pMaximumnessImage)
Definition: SaliencyCalculation.cpp:44
ObjectRecognition.h
CObjectRecognition::FindAllObjectsRGBD
static void FindAllObjectsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, std::vector< std::string > &aNames, std::vector< Vec3d > &aPositions, std::vector< Mat3d > &aOrientations, std::vector< float > &aProbabilities)
Definition: ObjectRecognition.cpp:571
ARMARX_DEBUG
#define ARMARX_DEBUG
Definition: Logging.h:177
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:695
CObjectHypothesis::vCenter
Vec3d vCenter
Definition: ObjectHypothesis.h:308
CObjectHypothesis::nHypothesisNumber
int nHypothesisNumber
Definition: ObjectHypothesis.h:287
OLP_OBJECT_LEARNING_DIR
#define OLP_OBJECT_LEARNING_DIR
Definition: ObjectLearningByPushingDefinitions.h:162
visionx::ObjectLearningByPushing::getLastObjectTransformation
armarx::PoseBasePtr getLastObjectTransformation(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
Definition: ObjectLearningByPushing.cpp:159
CObjectHypothesis::aVisibleConfirmedPoints
std::vector< CHypothesisPoint * > aVisibleConfirmedPoints
Definition: ObjectHypothesis.h:294
armarx::VariantType::Long
const VariantTypeId Long
Definition: Variant.h:917
visionx::ObjectLearningByPushing::getScenePoints
visionx::types::PointList getScenePoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:122
filename
std::string filename
Definition: VisualizationRobot.cpp:84
visionx::ObjectLearningByPushing::getObjectHypothesisPoints
visionx::types::PointList getObjectHypothesisPoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the confirmed points constituting the object hypothesis.
Definition: ObjectLearningByPushing.cpp:100
OLP_MAKE_RESULT_SCREENSHOTS
#define OLP_MAKE_RESULT_SCREENSHOTS
Definition: ObjectLearningByPushingDefinitions.h:201
SegmentedPointCloudFusion.h
GaussBackground.h
HypothesisVisualization.h
CObjectHypothesis
Definition: ObjectHypothesis.h:249
CObjectRecognition::LoadObjectDescriptorRGBD
static bool LoadObjectDescriptorRGBD(const std::string sObjectName, const int nDescriptorNumber, CObjectHypothesis *&pHypothesis)
Definition: ObjectRecognition.cpp:163
CObjectHypothesis::fMaxExtent
float fMaxExtent
Definition: ObjectHypothesis.h:315
OLP_SAVE_CONFIRMED_OBJECT
#define OLP_SAVE_CONFIRMED_OBJECT
Definition: ObjectLearningByPushingDefinitions.h:204
visionx::ImageProviderInfo::destinationImageType
ImageType destinationImageType
Required image destination type.
Definition: ImageProcessor.h:491
GfxTL::Matrix3f
MatrixXX< 3, 3, float > Matrix3f
Definition: MatrixXX.h:600
GfxTL::Matrix4f
MatrixXX< 4, 4, float > Matrix4f
Definition: MatrixXX.h:601
OLP_MAKE_INTERMEDIATE_SCREENSHOTS
#define OLP_MAKE_INTERMEDIATE_SCREENSHOTS
Definition: ObjectLearningByPushingDefinitions.h:202
CObjectHypothesisArray
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
Definition: ObjectHypothesis.h:359
CObjectHypothesis::mLastRotation
Mat3d mLastRotation
Definition: ObjectHypothesis.h:317
float
#define float
Definition: 16_Level.h:22
ARMARX_INFO
#define ARMARX_INFO
Definition: Logging.h:174
CObjectHypothesis::aNewPoints
std::vector< CHypothesisPoint * > aNewPoints
Definition: ObjectHypothesis.h:291
visionx::ObjectLearningByPushing::process
void process() override
Process the vision component.
Definition: ObjectLearningByPushing.cpp:449
COLPTools::DrawHypothesis
void DrawHypothesis(CObjectHypothesis *pHypothesis, const CCalibration *calibration, CByteImage *pImage, const int nNumFusedHypotheses)
Definition: OLPTools.cpp:1425
memoryx::KBM::Vector3
Eigen::Vector3d Vector3
Definition: kbm.h:41
ImageUtil.h
OLP_CENTRAL_POSITION_FOR_PUSHING_Z
#define OLP_CENTRAL_POSITION_FOR_PUSHING_Z
Definition: ObjectLearningByPushingDefinitions.h:320
CObjectHypothesis::aConfirmedPoints
std::vector< CHypothesisPoint * > aConfirmedPoints
Definition: ObjectHypothesis.h:292
OLP_CENTRAL_POSITION_FOR_PUSHING_Y
#define OLP_CENTRAL_POSITION_FOR_PUSHING_Y
Definition: ObjectLearningByPushingDefinitions.h:319
TypeMapping.h
OLP_SCREENSHOT_PATH
#define OLP_SCREENSHOT_PATH
Definition: ObjectLearningByPushingDefinitions.h:206
visionx::ObjectLearningByPushing::CreateInitialObjectHypotheses
void CreateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Creates the initial object hypotheses.
Definition: ObjectLearningByPushing.cpp:76
armarx::VariantType::FramedPosition
const VariantTypeId FramedPosition
Definition: FramedPose.h:39
ARMARX_WARNING
#define ARMARX_WARNING
Definition: Logging.h:186
SaliencyCalculation.h
CHypothesisGeneration
Definition: HypothesisGeneration.h:50
CObjectHypothesis::aHypothesisHasMoved
std::vector< bool > aHypothesisHasMoved
Definition: ObjectHypothesis.h:297
visionx::ObjectLearningByPushing::getUpwardsVector
armarx::Vector3BasePtr getUpwardsVector(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:143
CObjectRecognition::FindObjectWithManyDescriptorsRGBD
static void FindObjectWithManyDescriptorsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, const std::string objectName, const int numDescriptors)
Definition: ObjectRecognition.cpp:645
CObjectRecognition::SaveObjectDescriptorRGBD
static void SaveObjectDescriptorRGBD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:105
armarx
This file offers overloads of toIce() and fromIce() functions for STL container types.
Definition: ArmarXTimeserver.cpp:28
visionx::ObjectLearningByPushing::onConnectPointCloudAndImageProcessor
void onConnectPointCloudAndImageProcessor() override
Implement this method in your PointCloudAndImageProcessor in order execute parts when the component i...
Definition: ObjectLearningByPushing.cpp:221