ObjectLearningByPushing.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 
26 
27 #include "FeatureCalculation.h"
28 #include "GaussBackground.h"
29 #include "HypothesisGeneration.h"
32 #include "OLPTools.h"
33 #include "ObjectRecognition.h"
34 #include "PointCloudRegistration.h"
35 #include "SaliencyCalculation.h"
37 
38 
39 // IVT
40 #include <Calibration/Calibration.h>
41 #include <Calibration/StereoCalibration.h>
42 #include <Image/ByteImage.h>
43 #include <Image/ImageProcessor.h>
44 #include <Math/FloatMatrix.h>
45 #include <Math/LinearAlgebra.h>
46 
47 
48 // OpenMP
49 #include <omp.h>
50 
51 
52 // VisionX
53 #include <VisionX/interface/components/Calibration.h>
56 
57 
58 // PCL
59 #include <sys/time.h>
60 
61 #include <Eigen/Geometry>
62 
63 #include <pcl/io/pcd_io.h>
64 #include <pcl/point_cloud.h>
65 #include <pcl/point_types.h>
66 
67 
68 using namespace visionx;
69 using namespace armarx;
70 
71 void
73 {
74  listener->resetHypothesesStatus();
75  currentState = eCreatingInitialHypotheses;
76 }
77 
78 void
80 {
81  listener->resetHypothesesStatus();
82  currentState = eValidatingInitialHypotheses;
83 }
84 
85 void
87 {
88  listener->resetHypothesesStatus();
89  currentState = eRevalidatingConfirmedHypotheses;
90 }
91 
92 types::PointList
94 {
95  types::PointList hypothesisPoints;
96  std::vector<CHypothesisPoint*>* pPoints;
97  SelectPreferredHypothesis(pPoints);
98 
99  if (pPoints)
100  {
101  for (size_t i = 0; i < pPoints->size(); i++)
102  {
103  Eigen::Vector3f p(pPoints->at(i)->vPosition.x,
104  pPoints->at(i)->vPosition.y,
105  pPoints->at(i)->vPosition.z);
106  armarx::Vector3Ptr point = new armarx::Vector3(p);
107  hypothesisPoints.push_back(point);
108  }
109  }
110 
111  return hypothesisPoints;
112 }
113 
114 types::PointList
116 {
117  types::PointList scenePoints;
118  std::vector<CHypothesisPoint*>* points = m_pAllNewDepthMapPoints;
119  if (m_pAllNewDepthMapPoints->size() == 0)
120  {
121  points = m_pAllOldDepthMapPoints;
122  }
123 
124  for (size_t i = 0; i < points->size(); i++)
125  {
126  Eigen::Vector3f p(
127  points->at(i)->vPosition.x, points->at(i)->vPosition.y, points->at(i)->vPosition.z);
128  armarx::Vector3Ptr point = new armarx::Vector3(p);
129  scenePoints.push_back(point);
130  }
131  return scenePoints;
132 }
133 
134 Vector3BasePtr
136 {
137  Vector3BasePtr upVector = new Vector3(upwardsVector.x, upwardsVector.y, upwardsVector.z);
138  return upVector;
139 }
140 
141 std::string
143 {
144  return cameraFrameName;
145 }
146 
147 PoseBasePtr
149 {
150  std::vector<CHypothesisPoint*>* pPoints;
151  CObjectHypothesis* pHypothesis = SelectPreferredHypothesis(pPoints);
152  PoseBasePtr result = NULL;
153 
154  if (pHypothesis)
155  {
157  Eigen::Vector3f trans = tools::convertIVTtoEigen(pHypothesis->vLastTranslation);
158  result = new Pose(rot, trans);
159  }
160 
161  return result;
162 }
163 
164 void
165 ObjectLearningByPushing::recognizeObject(const std::string& objectName, const Ice::Current& c)
166 {
167  UpdateDataFromRobot();
168  SetHeadToPlatformTransformation(
169  tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation, true);
170 
171  m_pFeatureCalculation->GetAllFeaturePoints(colorImageLeft,
172  greyImageLeft,
173  pointCloudPtr,
175  *m_pAllNewSIFTPoints,
176  *m_pAllNewMSERs,
177  *m_pAllNewDepthMapPoints,
178  calibration);
179 
180  RecognizeHypotheses(colorImageLeft, objectName);
181 }
182 
183 void
185 {
186  // todo: remove this again when done with the old learned objects
187  //convertFileOLPtoPCL("/homes/staff/schieben/datalog/test/BunteKanne.dat");
188 
189  ARMARX_VERBOSE << "entering onInitPointCloudAndImageProcessor()";
190 
191  imageProviderName = getProperty<std::string>("ImageProviderAdapterName").getValue();
192  usingImageProvider(imageProviderName);
193 
194  robotStateProxyName = getProperty<std::string>("RobotStateProxyName").getValue();
195  usingProxy(robotStateProxyName);
196 
197 
198  cameraFrameName = getProperty<std::string>("CameraFrameName").getValue();
199 
200  connected = false;
201  currentState = eNoHypotheses;
202 
203 
204  offeringTopic("ObjectLearningByPushing");
205  pointcloudProviderName = getProperty<std::string>("PointCloudProviderAdapterName").getValue();
206  usingPointCloudProvider(pointcloudProviderName);
207 
208 
209  offeringTopic(getProperty<std::string>("DebugDrawerTopicName").getValue());
210 }
211 
212 void
214 {
215  ARMARX_VERBOSE << "entering onConnectPointCloudAndImageProcessor()";
216 
217  // connect to point cloud provider
218  ARMARX_INFO << "Connecting to " << pointcloudProviderName;
219  // pointcloudProviderProxy = getProxy<CapturingPointCloudAndImageAndCalibrationProviderInterfacePrx>(pointcloudProviderName);
220  // calibration = tools::convert(pointcloudProviderProxy->getMonocularCalibration()); // == left stereo image
221  pointcloudProviderProxy = getProxy<RGBDPointCloudProviderInterfacePrx>(pointcloudProviderName);
222  calibration = tools::convert(pointcloudProviderProxy->getStereoCalibration().calibrationLeft);
223 
224  if (cameraFrameName != pointcloudProviderProxy->getReferenceFrame())
225  {
226  ARMARX_WARNING << "Specified camera frame name: '" << cameraFrameName
227  << "' does not match frame given by provider '"
228  << pointcloudProviderProxy->getReferenceFrame() << "'!";
229  }
230 
231  // FIXME: workaround for depth camera sim frame missalignment
232  if (pointcloudProviderProxy->getReferenceFrame() == "DepthCameraSim")
233  {
234  ARMARX_WARNING << "Simulation detected. Applying fix for DepthCameraSim missalignment.";
235 
236  // see http://ivt.sourceforge.net/doxygen/class_c_calibration.html#details
237  Mat3d R = tools::convert(
238  pointcloudProviderProxy->getStereoCalibration().calibrationLeft.cameraParam.rotation);
239  // Eigen::Matrix3f worldToCamera = (Eigen::Matrix3f() << R.r1, R.r2, R.r3, R.r4, R.r5, R.r6, R.r7, R.r8, R.r9).finished()
240  // Eigen::Matrix3f cameraToImg = Eigen::AngleAxisf(-M_PI / 2.0f, Eigen::Vector3f::UnitZ()).toRotationMatrix();
241  Eigen::Matrix3f worldToCamera;
242  worldToCamera << R.r1, R.r2, R.r3, R.r4, R.r5, R.r6, R.r7, R.r8, R.r9;
243  Eigen::Matrix3f cameraToImg;
244  cameraToImg << 0, -1, 0, 1, 0, 0, 0, 0, 1; // rotate -90° around z
245 
246  worldToCamera = cameraToImg * worldToCamera;
247  R.r1 = worldToCamera(0, 0);
248  R.r2 = worldToCamera(0, 1);
249  R.r3 = worldToCamera(0, 2);
250  R.r4 = worldToCamera(1, 0);
251  R.r5 = worldToCamera(1, 1);
252  R.r6 = worldToCamera(1, 2);
253  R.r7 = worldToCamera(2, 0);
254  R.r8 = worldToCamera(2, 1);
255  R.r9 = worldToCamera(2, 2);
256 
257  calibration->SetRotation(R);
258  }
259 
260  pointCloudPtr.reset(new pcl::PointCloud<pcl::PointXYZRGBA>());
261 
262  // connect to image provider
263  ARMARX_INFO << "Connecting to " << imageProviderName;
264  visionx::ImageProviderInfo imageProviderInfo = getImageProvider(imageProviderName);
265  imageProviderProxy = getProxy<ImageProviderInterfacePrx>(imageProviderName);
266 
267  colorImageLeft = tools::createByteImage(imageProviderInfo);
268  colorImageRight = tools::createByteImage(imageProviderInfo);
269 
270  colorImageLeftOld = new CByteImage(colorImageLeft);
271  greyImageLeft =
272  new CByteImage(colorImageLeft->width, colorImageLeft->height, CByteImage::eGrayScale);
273  greyImageRight = new CByteImage(greyImageLeft);
274  resultImageLeft = new CByteImage(colorImageLeft);
275  resultImageRight = new CByteImage(colorImageLeft);
276  tempResultImage = new CByteImage(colorImageLeft);
277 
278  cameraImages = new CByteImage*[2];
279  cameraImages[0] = colorImageLeft;
280  cameraImages[1] = colorImageRight;
281 
282  resultImages = new CByteImage*[2];
283  resultImages[0] = resultImageLeft;
284  resultImages[1] = resultImageRight;
285 
286  m_pFeatureCalculation = new CFeatureCalculation();
287 
288  // CHypothesisGeneration creates the initial object hypotheses
289  m_pHypothesisGeneration = new CHypothesisGeneration(calibration);
290 
291  // visualizes the hypotheses
292  m_pHypothesisVisualization = new CHypothesisVisualization(calibration);
293 
294 
295  // screenshots, if wanted
296  m_sScreenshotPath = OLP_SCREENSHOT_PATH;
297  m_bMakeIntermediateScreenshots = OLP_MAKE_INTERMEDIATE_SCREENSHOTS;
298 
299  iterationCounter = 0;
300 
301 
302  ARMARX_INFO << "Connecting to " << robotStateProxyName;
303  robotStateProxy = getProxy<RobotStateComponentInterfacePrx>(robotStateProxyName);
304 
305 
306  localRobot = RemoteRobot::createLocalCloneFromFile(
308 
309 
310  int nNumOMPThreads;
311 #if defined OMP_NUM_THREADS
312  nNumOMPThreads = OMP_NUM_THREADS;
313 #elif defined OLP_USE_DBVISION
314  nNumOMPThreads = omp_get_num_procs() - 1;
315 #else
316  nNumOMPThreads = omp_get_num_procs();
317 #endif
318 
319  if (nNumOMPThreads < 1)
320  {
321  nNumOMPThreads = 1;
322  }
323 
324  omp_set_num_threads(nNumOMPThreads);
325 
326  m_pObjectHypotheses = new CObjectHypothesisArray();
327  m_pConfirmedHypotheses = new CObjectHypothesisArray();
328  m_pInitialHypothesesAtLocalMaxima = new CObjectHypothesisArray();
329  m_pAllNewSIFTPoints = new CSIFTFeatureArray();
330  m_pAllOldSIFTPoints = new CSIFTFeatureArray();
331  m_pAllNewMSERs = new std::vector<CMSERDescriptor3D*>();
332  m_pAllOldMSERs = new std::vector<CMSERDescriptor3D*>();
333  m_pCorrespondingMSERs = new std::vector<CMSERDescriptor3D*>();
334  m_pAllNewDepthMapPoints = new std::vector<CHypothesisPoint*>();
335  m_pAllOldDepthMapPoints = new std::vector<CHypothesisPoint*>();
336 
337  m_pGaussBackground = new CGaussBackground(OLP_IMG_WIDTH, OLP_IMG_HEIGHT);
338 
339  Math3d::SetVec(m_tHeadToPlatformTransformation.translation, Math3d::zero_vec);
340  Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, Math3d::zero_vec);
341  Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, Math3d::unit_mat);
342  Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, Math3d::unit_mat);
343 
344  Math3d::SetVec(upwardsVector, Math3d::zero_vec);
345 
346 
347  listener = getTopic<ObjectLearningByPushingListenerPrx>("ObjectLearningByPushing");
348 
349  enableResultImages(
350  2, imageProviderInfo.imageFormat.dimension, imageProviderInfo.destinationImageType);
351 
352  // debug
353  m_pSegmentedBackgroundImage =
354  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
355  m_pDisparityImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
356 
357  connected = true;
358 
359  debugDrawer = getTopic<DebugDrawerInterfacePrx>(
360  getProperty<std::string>("DebugDrawerTopicName").getValue());
361  debugDrawer->clearLayer(getName());
362 
363  ARMARX_IMPORTANT << "init finished";
364 }
365 
366 void
368 {
369  ARMARX_VERBOSE << "entering onExitPointCloudAndImageProcessor()";
370 
371  delete m_pHypothesisVisualization;
372  delete m_pHypothesisGeneration;
373 
374  for (int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
375  {
376  delete (*m_pObjectHypotheses)[i];
377  }
378 
379  delete m_pObjectHypotheses;
380 
381  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
382  {
383  delete (*m_pConfirmedHypotheses)[i];
384  }
385 
386  delete m_pConfirmedHypotheses;
387  delete m_pInitialHypothesesAtLocalMaxima;
388 
389 
390  for (int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
391  {
392  delete (*m_pAllNewSIFTPoints)[i];
393  }
394 
395  delete m_pAllNewSIFTPoints;
396 
397  for (int i = 0; i < m_pAllOldSIFTPoints->GetSize(); i++)
398  {
399  delete (*m_pAllOldSIFTPoints)[i];
400  }
401 
402  delete m_pAllOldSIFTPoints;
403 
404  for (size_t i = 0; i < m_pAllNewMSERs->size(); i++)
405  {
406  delete m_pAllNewMSERs->at(i);
407  }
408 
409  delete m_pAllNewMSERs;
410 
411  for (size_t i = 0; i < m_pAllOldMSERs->size(); i++)
412  {
413  delete m_pAllOldMSERs->at(i);
414  }
415 
416  delete m_pAllOldMSERs;
417 
418  for (size_t i = 0; i < m_pCorrespondingMSERs->size(); i++)
419  {
420  delete m_pCorrespondingMSERs->at(i);
421  }
422 
423  delete m_pCorrespondingMSERs;
424 
425  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
426  {
427  delete m_pAllNewDepthMapPoints->at(i);
428  }
429 
430  delete m_pAllNewDepthMapPoints;
431 
432  for (size_t i = 0; i < m_pAllOldDepthMapPoints->size(); i++)
433  {
434  delete m_pAllOldDepthMapPoints->at(i);
435  }
436 
437  delete m_pAllOldDepthMapPoints;
438 
439  delete m_pGaussBackground;
440 }
441 
442 void
444 {
445  if (!connected)
446  {
447  ARMARX_INFO << "process() called before ready (weird race condition with the "
448  "ImageProcessor::process)";
449  return;
450  }
451 
452  bool bMakeScreenshots = OLP_MAKE_RESULT_SCREENSHOTS;
453 
454  std::lock_guard<std::mutex> lock(processingLock);
455 
456  switch (currentState)
457  {
458  case eCreatingInitialHypotheses:
459  {
460  // get new camera images and camera pose from robot
461  UpdateDataFromRobot();
462  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation,
463  tHeadToPlatformTransformation.rotation,
464  true);
465 
466  // create initial object hypotheses
467  CreateInitialObjectHypothesesInternal(
468  greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
469 
470  // show hypotheses
471  VisualizeHypotheses(greyImageLeft,
472  greyImageRight,
473  colorImageLeft,
474  colorImageRight,
475  false,
476  resultImageLeft,
477  resultImageRight,
478  bMakeScreenshots);
479  provideResultImages(resultImages);
480 
481  // return information about object hypotheses and go to new state
482  if (GetNumberOfNonconfirmedHypotheses() > 0)
483  {
484  ARMARX_INFO << "Found " << GetNumberOfNonconfirmedHypotheses()
485  << " possible objects";
486 
487  // return object position etc
488  ReportObjectPositionInformationToObserver();
489  listener->reportInitialObjectHypothesesCreated(true);
490 
491  currentState = eHasInitialHypotheses;
492  }
493  else
494  {
495  ARMARX_INFO << "No object hypothesis found";
496 
497  listener->reportInitialObjectHypothesesCreated(false);
498 
499  currentState = eNoHypotheses;
500  }
501 
502  iterationCounter++;
503 
504  break;
505  }
506 
507  case eValidatingInitialHypotheses:
508  {
509  // get new camera images and camera pose from robot
510  ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
511  UpdateDataFromRobot();
512  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation,
513  tHeadToPlatformTransformation.rotation,
514  true);
515 
516  // try to validate the hypotheses
517  bool validationSuccessful = ValidateInitialObjectHypothesesInternal(
518  greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
519 
520  // show initial hypotheses plus those generated in changed image areas
521  VisualizeHypotheses(greyImageLeft,
522  greyImageRight,
523  colorImageLeftOld,
524  colorImageRight,
525  false,
526  resultImageRight,
527  tempResultImage,
528  bMakeScreenshots);
529  // show validated hypotheses
530  VisualizeHypotheses(greyImageLeft,
531  greyImageRight,
532  colorImageLeft,
533  colorImageRight,
534  true,
535  resultImageLeft,
536  tempResultImage,
537  bMakeScreenshots);
538  provideResultImages(resultImages);
539 
540  if (validationSuccessful)
541  {
542  ARMARX_INFO << "At least one object hypothesis was confirmed";
543 
544  // return object position etc
545  ReportObjectPositionInformationToObserver();
546  listener->reportObjectHypothesesValidated(true);
547 
548  currentState = eHasConfirmedHypotheses;
549  }
550  else
551  {
552  ARMARX_INFO << "No object hypothesis was confirmed";
553 
554  listener->reportObjectHypothesesValidated(false);
555 
556  currentState = eNoHypotheses;
557  }
558 
559  iterationCounter++;
560 
561  break;
562  }
563 
564  case eRevalidatingConfirmedHypotheses:
565  {
566  // get new camera images and camera pose from robot
567  ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
568  UpdateDataFromRobot();
569  SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation,
570  tHeadToPlatformTransformation.rotation,
571  true);
572 
573  // try to revalidate the hypotheses
574  bool validationSuccessful = RevalidateConfirmedObjectHypothesesInternal(
575  greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
576 
577  // show validated hypothesis
578  VisualizeHypotheses(greyImageLeft,
579  greyImageRight,
580  colorImageLeft,
581  colorImageRight,
582  true,
583  resultImageLeft,
584  resultImageRight,
585  bMakeScreenshots);
586  provideResultImages(resultImages);
587 
588  ARMARX_INFO << "The confirmed object hypotheses were revalidated";
589  ARMARX_IMPORTANT << "Was revalidation successful: " << validationSuccessful;
590 
591  // return object position etc
592  ReportObjectPositionInformationToObserver();
593  listener->reportObjectHypothesesValidated(validationSuccessful);
594 
595  currentState = eHasConfirmedHypotheses;
596 
597  iterationCounter++;
598 
599  break;
600  }
601 
602  case eHasInitialHypotheses:
603  case eHasConfirmedHypotheses:
604  {
605  provideResultImages(resultImages);
606  usleep(300000);
607  break;
608  }
609 
610  case eNoHypotheses:
611  {
612  UpdateDataFromRobot();
613  ::ImageProcessor::CopyImage(cameraImages[0], resultImages[0]);
614  ::ImageProcessor::CopyImage(cameraImages[0], resultImages[1]);
615  provideResultImages(resultImages);
616  usleep(300000);
617  break;
618  }
619 
620  case eQuit:
621  {
622  ARMARX_IMPORTANT << "Received quit signal, but dont know what to do!";
623  break;
624  }
625 
626  default:
627  {
628  ARMARX_WARNING << "The current mode is not handled here!";
629  break;
630  }
631  }
632 }
633 
634 void
635 ObjectLearningByPushing::SwapAllPointsArraysToOld()
636 {
637  CSIFTFeatureArray* pTempSIFTPoints;
638  pTempSIFTPoints = m_pAllOldSIFTPoints;
639  m_pAllOldSIFTPoints = m_pAllNewSIFTPoints;
640  m_pAllNewSIFTPoints = pTempSIFTPoints;
641 
642  for (int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
643  {
644  delete (*m_pAllNewSIFTPoints)[i];
645  }
646 
647  m_pAllNewSIFTPoints->Clear();
648 
649  std::vector<CMSERDescriptor3D*>* pTempMSERs;
650  pTempMSERs = m_pAllOldMSERs;
651  m_pAllOldMSERs = m_pAllNewMSERs;
652  m_pAllNewMSERs = pTempMSERs;
653 
654  for (size_t i = 0; i < m_pAllNewMSERs->size(); i++)
655  {
656  delete m_pAllNewMSERs->at(i);
657  }
658 
659  m_pAllNewMSERs->clear();
660 
661  std::vector<CHypothesisPoint*>* pTempDepthMapPoints;
662  pTempDepthMapPoints = m_pAllOldDepthMapPoints;
663  m_pAllOldDepthMapPoints = m_pAllNewDepthMapPoints;
664  m_pAllNewDepthMapPoints = pTempDepthMapPoints;
665 
666  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
667  {
668  delete m_pAllNewDepthMapPoints->at(i);
669  }
670 
671  m_pAllNewDepthMapPoints->clear();
672 }
673 
674 void
675 ObjectLearningByPushing::CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
676  CByteImage* pImageGreyRight,
677  CByteImage* pImageColorLeft,
678  CByteImage* pImageColorRight,
679  int nImageNumber)
680 {
681  // clean up
682  for (int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
683  {
684  delete (*m_pObjectHypotheses)[i];
685  }
686 
687  m_pObjectHypotheses->Clear();
688 
689  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
690  {
691  delete (*m_pConfirmedHypotheses)[i];
692  }
693 
694  m_pConfirmedHypotheses->Clear();
695  m_pInitialHypothesesAtLocalMaxima->Clear();
696 
697  // get all interest points with descriptors
698  //m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageColorRight, pImageGreyLeft, pImageGreyRight, 2*OLP_DEPTH_MAP_PIXEL_DISTANCE,
699  // m_pStereoCalibration, *m_pAllNewSIFTPoints, *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, m_pDisparityImage);
700  m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft,
701  pImageGreyLeft,
702  pointCloudPtr,
704  *m_pAllNewSIFTPoints,
705  *m_pAllNewMSERs,
706  *m_pAllNewDepthMapPoints,
707  calibration);
708  //m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr, OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
709  // *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
710 
711  if (m_bMakeIntermediateScreenshots)
712  {
713  std::string sScreenshotFile = m_sScreenshotPath + "disp0000.bmp";
714  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
715  m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
716 
717  // // TODO: test!
718  // CByteImage* pEdges = new CByteImage(m_pDisparityImage);
719  // COLPTools::CalculateEdgeImageFromImageAndDisparity(pImageGreyLeft, m_pDisparityImage, pEdges);
720  // sScreenshotFile = m_sScreenshotPath+"edges0000.bmp";
721  // COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
722  // pEdges->SaveToFile(sScreenshotFile.c_str());
723  // delete pEdges;
724  }
725 
726 
727  // learn background
728  m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
729 
730  // create hypotheses
731  m_pHypothesisGeneration->FindObjectHypotheses(pImageColorLeft,
732  pImageColorRight,
733  pImageGreyLeft,
734  pImageGreyRight,
735  *m_pAllNewSIFTPoints,
736  *m_pAllNewMSERs,
737  *m_pAllNewDepthMapPoints,
738  *m_pObjectHypotheses);
739 
740 
741 #ifdef OLP_FILTER_INITIAL_HYPOTHESES_WITH_MAXIMUMNESS
742 
743  // remove hypotheses that are not local maxima
744  std::vector<Vec3d> aMaxima;
745  CByteImage* pMaximumnessImage =
746  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
747  CSaliencyCalculation::FindLocalMaxima(*m_pAllNewDepthMapPoints,
748  m_tHeadToPlatformTransformation.rotation,
749  m_tHeadToPlatformTransformation.translation,
750  calibration,
751  aMaxima,
752  pMaximumnessImage);
753 
754  if (m_bMakeIntermediateScreenshots)
755  {
756  std::string sScreenshotFile = m_sScreenshotPath + "max0000.bmp";
757  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
758  pMaximumnessImage->SaveToFile(sScreenshotFile.c_str());
759  }
760 
761  for (int n = 0; n < m_pObjectHypotheses->GetSize(); n++)
762  {
763  CObjectHypothesis* pHypothesis = (*m_pObjectHypotheses)[n];
764  float fForegroundRatio;
765  int nNumForegroundPoints;
767  pHypothesis, pMaximumnessImage, calibration, fForegroundRatio, nNumForegroundPoints);
768 
769  //ARMARX_VERBOSE << "Hypo " << n << ": " << pHypothesis->aNewPoints.size() << " p, maxness " << 100*fForegroundRatio;
770  if (fForegroundRatio > 0.4f)
771  {
772  m_pInitialHypothesesAtLocalMaxima->AddElement(pHypothesis);
773  }
774  }
775 
776  if ((m_pInitialHypothesesAtLocalMaxima->GetSize() == 0) && (m_pObjectHypotheses->GetSize() > 0))
777  {
778  m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[0]);
779  }
780 
781  m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft,
782  pImageColorRight,
783  *m_pInitialHypothesesAtLocalMaxima,
784  *m_pAllOldSIFTPoints,
785  *m_pAllOldMSERs,
786  *m_pCorrespondingMSERs,
787  false,
788  NULL,
789  NULL,
790  m_bMakeIntermediateScreenshots);
791 
792  delete pMaximumnessImage;
793 #else
794 
795  for (int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
796  {
797  m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[i]);
798  }
799 
800 #endif
801 
802  // copy all points to arrays for old ones
803  SwapAllPointsArraysToOld();
804 }
805 
806 bool
807 ObjectLearningByPushing::ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
808  CByteImage* pImageGreyRight,
809  CByteImage* pImageColorLeft,
810  CByteImage* pImageColorRight,
811  int nImageNumber)
812 {
813  //timeval tStart, tEnd;
814 
815  // clean up
816  for (int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
817  {
818  delete m_pCorrespondingMSERs->at(i);
819  }
820 
821  m_pCorrespondingMSERs->clear();
822 
823  // calculate background segmentation
824  //gettimeofday(&tStart, 0);
825  m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
826  BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
827  m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
828 
829  //gettimeofday(&tEnd, 0);
830  //long tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
831  //ARMARX_VERBOSE << "Time for background: " << tTimeDiff << " ms";
832  if (m_bMakeIntermediateScreenshots)
833  {
834  std::string sScreenshotFile = m_sScreenshotPath + "bg0000.bmp";
835  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
836  m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
837  }
838 
839  // find new features in the actual image
840  m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft,
841  pImageGreyLeft,
842  pointCloudPtr,
844  *m_pAllNewSIFTPoints,
845  *m_pAllNewMSERs,
846  *m_pAllNewDepthMapPoints,
847  calibration);
848 
849 
850  if (m_bMakeIntermediateScreenshots)
851  {
852  std::string sScreenshotFile = m_sScreenshotPath + "disp0000.bmp";
853  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
854  m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
855 
856  // TODO: test!
857  CByteImage* pEdges = new CByteImage(m_pDisparityImage);
859  pImageGreyLeft, m_pDisparityImage, pEdges);
860  sScreenshotFile = m_sScreenshotPath + "edges0000.bmp";
861  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
862  pEdges->SaveToFile(sScreenshotFile.c_str());
863  delete pEdges;
864  }
865 
866  CByteImage* pHSVImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
867  ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
868 
869 
871  *m_pAllNewDepthMapPoints,
872  m_pSegmentedBackgroundImage,
873  pHSVImage,
874  calibration,
875  upwardsVector,
876  m_pObjectHypotheses,
877  m_pConfirmedHypotheses);
878 
879 
880  int nNumValidatedObjects = m_pConfirmedHypotheses->GetSize();
881 
882  // copy all points to arrays for old ones
883  SwapAllPointsArraysToOld();
884 
885  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
886  {
887  ARMARX_VERBOSE << "Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber
888  << " has been revalidated. "
889  << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size()
890  << " confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size()
891  << " new candidates. Position: (" << (*m_pConfirmedHypotheses)[i]->vCenter.x
892  << ", " << (*m_pConfirmedHypotheses)[i]->vCenter.y << ", "
893  << (*m_pConfirmedHypotheses)[i]->vCenter.z << ")";
894  }
895 
896  // sort by size
897  for (int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
898  {
899  for (int j = i; j > 0; j--)
900  {
901  if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >=
902  (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
903  {
904  break;
905  }
906 
907  CObjectHypothesis* pTemp = (*m_pConfirmedHypotheses)[j];
908  (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
909  (*m_pConfirmedHypotheses)[j - 1] = pTemp;
910  }
911  }
912 
914  {
915  SaveHistogramOfConfirmedHypothesis("NewObject", nImageNumber);
916  }
917 
918  delete pHSVImage;
919 
920  return (nNumValidatedObjects > 0);
921 }
922 
923 bool
924 ObjectLearningByPushing::RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft,
925  CByteImage* pImageGreyRight,
926  CByteImage* pImageColorLeft,
927  CByteImage* pImageColorRight,
928  int nImageNumber)
929 {
930  //timeval tStart, tEnd;
931 
932  // clean up
933  for (int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
934  {
935  delete m_pCorrespondingMSERs->at(i);
936  }
937 
938  m_pCorrespondingMSERs->clear();
939 
940  // calculate background segmentation
941  //gettimeofday(&tStart, 0);
942  m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
943  BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
944  m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
945 
946  //gettimeofday(&tEnd, 0);
947  //long tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
948  //ARMARX_VERBOSE << "Time for background: " << tTimeDiff << " ms";
949  if (m_bMakeIntermediateScreenshots)
950  {
951  std::string sScreenshotFile = m_sScreenshotPath + "bg0000.bmp";
952  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
953  m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
954  }
955 
956 
957  // find new features in the actual image
958  m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft,
959  pImageGreyLeft,
960  pointCloudPtr,
962  *m_pAllNewSIFTPoints,
963  *m_pAllNewMSERs,
964  *m_pAllNewDepthMapPoints,
965  calibration);
966 
967 
969  {
970  ARMARX_INFO << "Saving complete scene pointcloud";
971 
972  // get subset of depthmap without nan and (0,0,0) points
973  std::vector<Vec3d> all3DPoints;
974 
975  for (size_t i = 0; i < pointCloudPtr->size(); i++)
976  {
977  pcl::PointXYZRGBA point = pointCloudPtr->at(i);
978 
979  if (!isnan(point.x))
980  {
981  if (point.x * point.x + point.y * point.y + point.z * point.z > 0)
982  {
983  Vec3d point3d = {point.x, point.y, point.z};
984  all3DPoints.push_back(point3d);
985  }
986  }
987  }
988 
989  // all 3D points
990  setlocale(LC_NUMERIC, "C");
991  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
992  sFileName.append("all3Dpoints00.dat");
993  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
994  FILE* pFile = fopen(sFileName.c_str(), "wt");
995  fprintf(pFile, "%ld \n", all3DPoints.size());
996  Vec3d center = {0, 0, 0};
997 
998  for (size_t i = 0; i < all3DPoints.size(); i++)
999  {
1000  fprintf(pFile,
1001  "%e %e %e \n",
1002  all3DPoints.at(i).x,
1003  all3DPoints.at(i).y,
1004  all3DPoints.at(i).z);
1005  Math3d::AddToVec(center, all3DPoints.at(i));
1006  }
1007 
1008  fclose(pFile);
1009 
1010  // all 3D points with zero mean
1011  sFileName = OLP_OBJECT_LEARNING_DIR;
1012  sFileName.append("all3Dpoints-centered00.dat");
1013  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
1014  pFile = fopen(sFileName.c_str(), "wt");
1015  fprintf(pFile, "%ld \n", all3DPoints.size());
1016  Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
1017 
1018  for (size_t i = 0; i < all3DPoints.size(); i++)
1019  {
1020  fprintf(pFile,
1021  "%e %e %e \n",
1022  all3DPoints.at(i).x - center.x,
1023  all3DPoints.at(i).y - center.y,
1024  all3DPoints.at(i).z - center.z);
1025  }
1026 
1027  fclose(pFile);
1028 
1029  // all used points
1030  setlocale(LC_NUMERIC, "C");
1031  sFileName = OLP_OBJECT_LEARNING_DIR;
1032  sFileName.append("allusedpoints00.dat");
1033  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
1034  pFile = fopen(sFileName.c_str(), "wt");
1035  fprintf(pFile, "%ld \n", all3DPoints.size());
1036  Math3d::SetVec(center, Math3d::zero_vec);
1037 
1038  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
1039  {
1040  fprintf(pFile,
1041  "%e %e %e \n",
1042  m_pAllNewDepthMapPoints->at(i)->vPosition.x,
1043  m_pAllNewDepthMapPoints->at(i)->vPosition.y,
1044  m_pAllNewDepthMapPoints->at(i)->vPosition.z);
1045  Math3d::AddToVec(center, m_pAllNewDepthMapPoints->at(i)->vPosition);
1046  }
1047 
1048  fclose(pFile);
1049 
1050  // all used points with zero mean
1051  sFileName = OLP_OBJECT_LEARNING_DIR;
1052  sFileName.append("allusedpoints-centered00.dat");
1053  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
1054  pFile = fopen(sFileName.c_str(), "wt");
1055  fprintf(pFile, "%ld \n", all3DPoints.size());
1056  Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
1057 
1058  for (size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
1059  {
1060  fprintf(pFile,
1061  "%e %e %e \n",
1062  m_pAllNewDepthMapPoints->at(i)->vPosition.x - center.x,
1063  m_pAllNewDepthMapPoints->at(i)->vPosition.y - center.y,
1064  m_pAllNewDepthMapPoints->at(i)->vPosition.z - center.z);
1065  }
1066 
1067  fclose(pFile);
1068 
1069  // camera image
1070  sFileName = OLP_OBJECT_LEARNING_DIR;
1071  sFileName.append("cameraimage00.bmp");
1072  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
1073  pImageColorLeft->SaveToFile(sFileName.c_str());
1074 
1075  // vector in direction of gravity (up)
1076  sFileName = OLP_OBJECT_LEARNING_DIR;
1077  sFileName.append("upwardsvector00.dat");
1078  COLPTools::SetNumberInFileName(sFileName, nImageNumber, 2);
1079  pFile = fopen(sFileName.c_str(), "wt");
1080  fprintf(pFile, "%e %e %e \n", upwardsVector.x, upwardsVector.y, upwardsVector.z);
1081  fclose(pFile);
1082  }
1083 
1084 
1085  if (m_bMakeIntermediateScreenshots)
1086  {
1087  std::string sScreenshotFile = m_sScreenshotPath + "disp0000.bmp";
1088  COLPTools::SetNumberInFileName(sScreenshotFile, nImageNumber);
1089  m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
1090  }
1091 
1092 
1093  // re-validate the confirmed hypothesis
1094 
1095  CByteImage* pHSVImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
1096  ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1097 
1098  CHypothesisValidationRGBD::RevalidateHypotheses(*m_pAllNewDepthMapPoints,
1099  m_pSegmentedBackgroundImage,
1100  pHSVImage,
1101  calibration,
1102  upwardsVector,
1103  m_pConfirmedHypotheses);
1104 
1105 
1106  // sort by size
1107  for (int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
1108  {
1109  for (int j = i; j > 0; j--)
1110  {
1111  if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >=
1112  (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
1113  {
1114  break;
1115  }
1116 
1117  CObjectHypothesis* pTemp = (*m_pConfirmedHypotheses)[j];
1118  (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
1119  (*m_pConfirmedHypotheses)[j - 1] = pTemp;
1120  }
1121  }
1122 
1123 
1124  // remove hypotheses that did not move the last 1 times, but keep at least one
1125  const int nNumTimesHypothesisMustHaveMoved = 1;
1126  bool nothingMoved = false;
1127 
1128  for (int i = m_pConfirmedHypotheses->GetSize() - 1;
1129  i >= 0 && m_pConfirmedHypotheses->GetSize() > 1;
1130  i--)
1131  {
1132  bool bHasMoved = false;
1133  const int nNumEntries = (*m_pConfirmedHypotheses)[i]->aHypothesisHasMoved.size();
1134 
1135  for (int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved;
1136  j--)
1137  {
1138  bHasMoved |= (*m_pConfirmedHypotheses)[i]->aHypothesisHasMoved.at(j);
1139  }
1140 
1141  if (!bHasMoved)
1142  {
1143  ARMARX_VERBOSE << "Removing hypothesis "
1144  << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber
1145  << " because it did not move the last "
1146  << nNumTimesHypothesisMustHaveMoved << " times.";
1147  delete (*m_pConfirmedHypotheses)[i];
1148 
1149  for (int k = i; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1150  {
1151  (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1152  }
1153 
1154  m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1155  }
1156  }
1157 
1158  // if none of the hypotheses moved, return false in the end
1159  if (m_pConfirmedHypotheses->GetSize() == 1)
1160  {
1161  bool bHasMoved = false;
1162  const int nNumEntries = (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.size();
1163 
1164  for (int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved;
1165  j--)
1166  {
1167  bHasMoved |= (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.at(j);
1168  }
1169 
1170  if (!bHasMoved)
1171  {
1172  nothingMoved = true;
1173  }
1174  }
1175 
1176 
1177  // fuse hypotheses that overlap significantly
1178  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1179  {
1180  for (int j = i + 1; j < m_pConfirmedHypotheses->GetSize(); j++)
1181  {
1182  float fOverlapRatio = COLPTools::GetHypothesesIntersectionRatio(
1183  (*m_pConfirmedHypotheses)[i], (*m_pConfirmedHypotheses)[j], calibration);
1184  ARMARX_VERBOSE << "Overlap " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber << " "
1185  << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber << ": "
1186  << fOverlapRatio;
1187 
1188  if (fOverlapRatio > 0.5)
1189  {
1190  ARMARX_VERBOSE << "Removing hypothesis "
1191  << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber;
1192  delete (*m_pConfirmedHypotheses)[j];
1193 
1194  for (int k = j; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1195  {
1196  (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1197  }
1198 
1199  m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1200  j--;
1201  }
1202  }
1203  }
1204 
1205 
1206  // copy all points to arrays for old ones
1207  SwapAllPointsArraysToOld();
1208 
1209  for (int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1210  {
1211  ARMARX_VERBOSE << "Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber
1212  << " has been revalidated. "
1213  << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size()
1214  << " confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size()
1215  << " new candidates. Position: (" << (*m_pConfirmedHypotheses)[i]->vCenter.x
1216  << ", " << (*m_pConfirmedHypotheses)[i]->vCenter.y << ", "
1217  << (*m_pConfirmedHypotheses)[i]->vCenter.z << ")";
1218  }
1219 
1221  {
1222  SaveHistogramOfConfirmedHypothesis("NewObject", nImageNumber);
1223  }
1224 
1225  delete pHSVImage;
1226 
1227  // return false if none of the hypotheses moved (or the movement estimation failed)
1228  return !nothingMoved;
1229 }
1230 
1231 void
1232 ObjectLearningByPushing::UpdateDataFromRobot()
1233 {
1234  if (!waitForPointClouds(pointcloudProviderName) || !waitForImages(imageProviderName))
1235  {
1236  ARMARX_WARNING << "Timeout or error while waiting for point cloud data";
1237  }
1238  else
1239  {
1240  if (getPointClouds(pointcloudProviderName, pointCloudPtr))
1241  {
1242  // get images
1243  Ice::Long timestamp;
1244  if (pointCloudPtr->isOrganized())
1245  {
1246  size_t h = cameraImages[0]->height; // pointCloudPtr->height;
1247  size_t w = cameraImages[0]->width; // pointCloudPtr->width;
1248 
1249  for (size_t y = 0; y < h; y++)
1250  {
1251  for (size_t x = 0; x < w; x++)
1252  {
1253  const pcl::PointXYZRGBA& p = pointCloudPtr->at(x, y);
1254  cameraImages[0]->pixels[3 * (y * w + x) + 0] = p.r;
1255  cameraImages[0]->pixels[3 * (y * w + x) + 1] = p.g;
1256  cameraImages[0]->pixels[3 * (y * w + x) + 2] = p.b;
1257  }
1258  }
1259 
1260  timestamp = pointCloudPtr->header.stamp;
1261  }
1262  else
1263  {
1264  armarx::MetaInfoSizeBasePtr metaInfo;
1265  int nNumberImages = getImages(imageProviderName, cameraImages, metaInfo);
1266  ARMARX_DEBUG << "got " << nNumberImages << " images";
1267 
1268  timestamp = metaInfo->timeProvided;
1269  }
1270 
1271  RemoteRobot::synchronizeLocalCloneToTimestamp(localRobot, robotStateProxy, timestamp);
1272 
1273  ::ImageProcessor::ConvertImage(colorImageLeft, greyImageLeft);
1274  //::ImageProcessor::ConvertImage(colorImageRight, greyImageRight);
1275 
1276  Eigen::Matrix4f cameraNodePose =
1277  localRobot->getRobotNode(cameraFrameName)->getPoseInRootFrame();
1278  tHeadToPlatformTransformation.rotation.r1 = cameraNodePose(0, 0);
1279  tHeadToPlatformTransformation.rotation.r2 = cameraNodePose(0, 1);
1280  tHeadToPlatformTransformation.rotation.r3 = cameraNodePose(0, 2);
1281  tHeadToPlatformTransformation.rotation.r4 = cameraNodePose(1, 0);
1282  tHeadToPlatformTransformation.rotation.r5 = cameraNodePose(1, 1);
1283  tHeadToPlatformTransformation.rotation.r6 = cameraNodePose(1, 2);
1284  tHeadToPlatformTransformation.rotation.r7 = cameraNodePose(2, 0);
1285  tHeadToPlatformTransformation.rotation.r8 = cameraNodePose(2, 1);
1286  tHeadToPlatformTransformation.rotation.r9 = cameraNodePose(2, 2);
1287  tHeadToPlatformTransformation.translation.x = cameraNodePose(0, 3);
1288  tHeadToPlatformTransformation.translation.y = cameraNodePose(1, 3);
1289  tHeadToPlatformTransformation.translation.z = cameraNodePose(2, 3);
1290 
1291  Eigen::Vector3f upInRootCS = {0, 0, 1};
1292  Eigen::Vector3f upInCamCS = cameraNodePose.block<3, 3>(0, 0).inverse() * upInRootCS;
1293  Math3d::SetVec(upwardsVector, upInCamCS(0), upInCamCS(1), upInCamCS(2));
1294  }
1295  }
1296 }
1297 
1298 bool
1299 ObjectLearningByPushing::SaveHistogramOfConfirmedHypothesis(std::string sObjectName,
1300  int nDescriptorNumber)
1301 {
1302  if (m_pConfirmedHypotheses->GetSize() > 0)
1303  {
1304  ARMARX_INFO << "Saving object descriptor";
1306  (*m_pConfirmedHypotheses)[0], sObjectName, nDescriptorNumber);
1308  (*m_pConfirmedHypotheses)[0], sObjectName, nDescriptorNumber);
1309  return true;
1310  }
1311  else
1312  {
1313  ARMARX_VERBOSE << "No confirmed hypothesis that can be saved!";
1314  return false;
1315  }
1316 }
1317 
1318 void
1319 ObjectLearningByPushing::RecognizeHypotheses(CByteImage* pImageColorLeft,
1320  const std::string objectName)
1321 {
1322  CByteImage* pHSVImage =
1323  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24, false);
1324  ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1325 
1326  if (!objectName.empty())
1327  {
1329  pImageColorLeft,
1330  *m_pAllNewDepthMapPoints,
1331  calibration,
1332  upwardsVector,
1333  objectName,
1334  5);
1335  }
1336  else if (m_pConfirmedHypotheses->GetSize() != 0 && false)
1337  {
1338  Vec3d vPosition;
1339  Mat3d mOrientation;
1340  float fdistance, fProbability;
1341  CObjectRecognition::FindObjectRGBD((*m_pConfirmedHypotheses)[0],
1342  pHSVImage,
1343  *m_pAllNewDepthMapPoints,
1344  calibration,
1345  upwardsVector,
1346  vPosition,
1347  mOrientation,
1348  fdistance,
1349  fProbability);
1350  }
1351  else if (false)
1352  {
1353  std::vector<std::string> aNames;
1354  std::vector<Vec3d> aPositions;
1355  std::vector<Mat3d> aOrientations;
1356  std::vector<float> aProbabilities;
1358  pImageColorLeft,
1359  *m_pAllNewDepthMapPoints,
1360  calibration,
1361  upwardsVector,
1362  aNames,
1363  aPositions,
1364  aOrientations,
1365  aProbabilities);
1366  //CObjectRecognition::FindAllObjectsRFCH(pImageColorLeft, calibration, *m_pAllOldDepthMapPoints, aNames, aPositions, aOrientations, aProbabilities);
1367  }
1368  else if (false)
1369  {
1370  //CObjectHypothesis* pHypothesis;
1371  //Vec3d vPosition;
1372  //Mat3d mOrientation;
1373  //float fdistance, fProbability;
1374  //CObjectRecognition::LoadObjectDescriptorRGBD("BlueBowl", 3, pHypothesis);
1375  //CObjectRecognition::FindObject(pHypothesis, pHSVImage, pImageColorLeft, *m_pAllOldDepthMapPoints, calibration, vPosition, mOrientation, fdistance, fProbability);
1376  }
1377 
1378 
1379  delete pHSVImage;
1380 }
1381 
1382 void
1383 ObjectLearningByPushing::VisualizeHypotheses(CByteImage* pImageGreyLeft,
1384  CByteImage* pImageGreyRight,
1385  CByteImage* pImageColorLeft,
1386  CByteImage* pImageColorRight,
1387  bool bShowConfirmedHypotheses,
1388  CByteImage* resultImageLeft,
1389  CByteImage* resultImageRight,
1390  bool bMakeScreenshot)
1391 {
1392  if (bShowConfirmedHypotheses)
1393  {
1394  m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft,
1395  pImageColorRight,
1396  *m_pConfirmedHypotheses,
1397  *m_pAllOldSIFTPoints,
1398  *m_pAllOldMSERs,
1399  *m_pCorrespondingMSERs,
1400  true,
1401  resultImageLeft,
1402  resultImageRight,
1403  bMakeScreenshot);
1404  }
1405  else
1406  {
1407  m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft,
1408  pImageColorRight,
1409  *m_pObjectHypotheses,
1410  *m_pAllOldSIFTPoints,
1411  *m_pAllOldMSERs,
1412  *m_pCorrespondingMSERs,
1413  false,
1414  resultImageLeft,
1415  resultImageRight,
1416  bMakeScreenshot);
1417  }
1418 }
1419 
1420 Vec3d
1421 ObjectLearningByPushing::GetObjectPosition(float& fObjectExtent, bool bPreferCentralObject)
1422 {
1423  Vec3d vRet = {0, 0, 0};
1424  fObjectExtent = 0;
1425 
1426  std::vector<CHypothesisPoint*>* pPoints;
1427  CObjectHypothesis* pHypothesis = SelectPreferredHypothesis(pPoints, bPreferCentralObject);
1428 
1429  if (pHypothesis)
1430  {
1431  vRet.x = pHypothesis->vCenter.x;
1432  vRet.y = pHypothesis->vCenter.y;
1433  vRet.z = pHypothesis->vCenter.z;
1434  fObjectExtent = pHypothesis->fMaxExtent;
1435  }
1436 
1437  return vRet;
1438 }
1439 
1440 void
1441 ObjectLearningByPushing::ApplyHeadMotionTransformation(Mat3d mRotation, Vec3d vTranslation)
1442 {
1443  if (m_pConfirmedHypotheses->GetSize() > 0)
1444  {
1445  for (size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aNewPoints.size(); i++)
1446  {
1447  Math3d::MulMatVec(mRotation,
1448  (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition,
1449  vTranslation,
1450  (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition);
1451  }
1452 
1453  for (size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.size(); i++)
1454  {
1455  Math3d::MulMatVec(mRotation,
1456  (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition,
1457  vTranslation,
1458  (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition);
1459  }
1460 
1461  for (size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.size(); i++)
1462  {
1463  Math3d::MulMatVec(mRotation,
1464  (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition,
1465  vTranslation,
1466  (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition);
1467  }
1468  }
1469 }
1470 
1471 void
1472 ObjectLearningByPushing::GetHypothesisBoundingBox(int& nMinX, int& nMaxX, int& nMinY, int& nMaxY)
1473 {
1474  std::vector<CHypothesisPoint*>* pPoints;
1475  SelectPreferredHypothesis(pPoints);
1476 
1477  nMinX = 1000;
1478  nMaxX = -1000;
1479  nMinY = 1000;
1480  nMaxY = -1000;
1481  Vec2d vPoint2D;
1482 
1483  if (m_pConfirmedHypotheses->GetSize() > 0)
1484  {
1485  for (size_t i = 0; i < pPoints->size(); i++)
1486  {
1487  calibration->CameraToImageCoordinates(pPoints->at(i)->vPosition, vPoint2D, false);
1488 
1489  if (vPoint2D.x < nMinX)
1490  {
1491  nMinX = vPoint2D.x;
1492  }
1493 
1494  if (vPoint2D.x > nMaxX)
1495  {
1496  nMaxX = vPoint2D.x;
1497  }
1498 
1499  if (vPoint2D.y < nMinY)
1500  {
1501  nMinY = vPoint2D.y;
1502  }
1503 
1504  if (vPoint2D.y > nMaxY)
1505  {
1506  nMaxY = vPoint2D.y;
1507  }
1508  }
1509  }
1510 
1511  ARMARX_VERBOSE << "ObjectLearningByPushing::GetHypothesisBoundingBox: " << nMinX << " " << nMaxX
1512  << " " << nMinY << " " << nMaxY;
1513 }
1514 
1515 void
1516 ObjectLearningByPushing::GetHypothesisPrincipalAxesAndBoundingBox(Vec3d& vPrincipalAxis1,
1517  Vec3d& vPrincipalAxis2,
1518  Vec3d& vPrincipalAxis3,
1519  Vec3d& vEigenValues,
1520  Vec3d& vMaxExtentFromCenter,
1521  Vec2d& vBoundingBoxLU,
1522  Vec2d& vBoundingBoxRU,
1523  Vec2d& vBoundingBoxLL,
1524  Vec2d& vBoundingBoxRL)
1525 {
1526  std::vector<CHypothesisPoint*>* pPoints;
1527  SelectPreferredHypothesis(pPoints);
1528 
1529  const int nNumberOfSamples = pPoints->size();
1530 
1531  if (nNumberOfSamples < 3)
1532  {
1533  ARMARX_WARNING << "Hypothesis contains " << nNumberOfSamples << " points - aborting!";
1534  return;
1535  }
1536 
1537  // compute the principal axes with PCA
1538 
1539  Vec3d vAvgPoint = {0.0f, 0.0f, 0.0f};
1540 
1541  for (int i = 0; i < nNumberOfSamples; i++)
1542  {
1543  vAvgPoint.x += pPoints->at(i)->vPosition.x;
1544  vAvgPoint.y += pPoints->at(i)->vPosition.y;
1545  vAvgPoint.z += pPoints->at(i)->vPosition.z;
1546  }
1547 
1548  vAvgPoint.x /= (float)nNumberOfSamples;
1549  vAvgPoint.y /= (float)nNumberOfSamples;
1550  vAvgPoint.z /= (float)nNumberOfSamples;
1551  //ARMARX_VERBOSE << "vAvgPoint: (" << vAvgPoint.x << ", " << vAvgPoint.y << ", " << vAvgPoint.z << ")";
1552 
1553  CFloatMatrix mSamplePoints(3, nNumberOfSamples);
1554 
1555  for (int i = 0; i < nNumberOfSamples; i++)
1556  {
1557  mSamplePoints(0, i) = pPoints->at(i)->vPosition.x - vAvgPoint.x;
1558  mSamplePoints(1, i) = pPoints->at(i)->vPosition.y - vAvgPoint.y;
1559  mSamplePoints(2, i) = pPoints->at(i)->vPosition.z - vAvgPoint.z;
1560  }
1561 
1562  CFloatMatrix mEigenVectors(3, 3);
1563  CFloatMatrix mEigenValues(1, 3);
1564 
1565  LinearAlgebra::PCA(&mSamplePoints, &mEigenVectors, &mEigenValues);
1566 
1567  vPrincipalAxis1.x = mEigenVectors(0, 0);
1568  vPrincipalAxis1.y = mEigenVectors(1, 0);
1569  vPrincipalAxis1.z = mEigenVectors(2, 0);
1570 
1571  vPrincipalAxis2.x = mEigenVectors(0, 1);
1572  vPrincipalAxis2.y = mEigenVectors(1, 1);
1573  vPrincipalAxis2.z = mEigenVectors(2, 1);
1574 
1575  vPrincipalAxis3.x = mEigenVectors(0, 2);
1576  vPrincipalAxis3.y = mEigenVectors(1, 2);
1577  vPrincipalAxis3.z = mEigenVectors(2, 2);
1578 
1579  vEigenValues.x = mEigenValues(0, 0);
1580  vEigenValues.y = mEigenValues(0, 1);
1581  vEigenValues.z = mEigenValues(0, 2);
1582  //ARMARX_VERBOSE << "Eigenvalues: (" << pow(vEigenValues.x, 0.3333f) << ", " << pow(vEigenValues.y, 0.3333f) << ", " << pow(vEigenValues.z, 0.3333f) << ")";
1583 
1584  // matrix that transforms the 3D points into the coordinate system
1585  // given by the eigenvectors
1586  Mat3d mTransformation;
1587  mTransformation.r1 = mEigenVectors(0, 0);
1588  mTransformation.r2 = mEigenVectors(1, 0);
1589  mTransformation.r3 = mEigenVectors(2, 0);
1590  mTransformation.r4 = mEigenVectors(0, 1);
1591  mTransformation.r5 = mEigenVectors(1, 1);
1592  mTransformation.r6 = mEigenVectors(2, 1);
1593  mTransformation.r7 = mEigenVectors(0, 2);
1594  mTransformation.r8 = mEigenVectors(1, 2);
1595  mTransformation.r9 = mEigenVectors(2, 2);
1596 
1597  // find the maximal extent along the principal axes
1598  Vec3d vTransformedPoint;
1599  float fMaxExtentX = 0, fMaxExtentY = 0, fMaxExtentZ = 0;
1600 
1601  for (int i = 0; i < nNumberOfSamples; i++)
1602  {
1603  Vec3d vTempPoint = {mSamplePoints(0, i),
1604  mSamplePoints(1, i),
1605  mSamplePoints(2, i)}; // = original point minus average point
1606  Math3d::MulMatVec(mTransformation, vTempPoint, vTransformedPoint);
1607 
1608  if (fabsf(vTransformedPoint.x) > fMaxExtentX)
1609  {
1610  fMaxExtentX = fabsf(vTransformedPoint.x);
1611  }
1612 
1613  if (fabsf(vTransformedPoint.y) > fMaxExtentY)
1614  {
1615  fMaxExtentY = fabsf(vTransformedPoint.y);
1616  }
1617 
1618  if (fabsf(vTransformedPoint.z) > fMaxExtentZ)
1619  {
1620  fMaxExtentZ = fabsf(vTransformedPoint.z);
1621  }
1622  }
1623 
1624  vMaxExtentFromCenter.x = fMaxExtentX;
1625  vMaxExtentFromCenter.y = fMaxExtentY;
1626  vMaxExtentFromCenter.z = fMaxExtentZ;
1627  //ARMARX_VERBOSE << "vMaxExtentFromCenter: (" << vMaxExtentFromCenter.x << ", " << vMaxExtentFromCenter.y << ", " << vMaxExtentFromCenter.z << ")";
1628 
1629 
1630  // calculate the corners of the surface defined by the first two principal axes
1631  Vec3d vTranslationPrincipalAxis1, vTranslationPrincipalAxis2;
1632  Math3d::MulVecScalar(vPrincipalAxis1,
1633  0.5f * (2.5f * pow(vEigenValues.x, 0.3333f) + vMaxExtentFromCenter.x),
1634  vTranslationPrincipalAxis1);
1635  Math3d::MulVecScalar(vPrincipalAxis2,
1636  0.5f * (2.5f * pow(vEigenValues.y, 0.3333f) + vMaxExtentFromCenter.y),
1637  vTranslationPrincipalAxis2);
1638 
1639  Vec3d vCorner1, vCorner2, vCorner3, vCorner4;
1640 
1641  Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner1);
1642  Math3d::SubtractFromVec(vCorner1, vTranslationPrincipalAxis2);
1643 
1644  Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner2);
1645  Math3d::AddToVec(vCorner2, vTranslationPrincipalAxis2);
1646 
1647  Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner3);
1648  Math3d::SubtractFromVec(vCorner3, vTranslationPrincipalAxis2);
1649 
1650  Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner4);
1651  Math3d::AddToVec(vCorner4, vTranslationPrincipalAxis2);
1652 
1653  // project the corners into the image
1654  calibration->CameraToImageCoordinates(vCorner1, vBoundingBoxLU, false);
1655  calibration->CameraToImageCoordinates(vCorner2, vBoundingBoxRU, false);
1656  calibration->CameraToImageCoordinates(vCorner3, vBoundingBoxLL, false);
1657  calibration->CameraToImageCoordinates(vCorner4, vBoundingBoxRL, false);
1658 
1659  // sort the projected points: first assert that the two upper are above the two lower,
1660  // then check left and right for each pair
1661  Vec2d vTemp;
1662 
1663  if (vBoundingBoxLU.y > vBoundingBoxLL.y)
1664  {
1665  Math2d::SetVec(vTemp, vBoundingBoxLU);
1666  Math2d::SetVec(vBoundingBoxLU, vBoundingBoxLL);
1667  Math2d::SetVec(vBoundingBoxLL, vTemp);
1668  }
1669 
1670  if (vBoundingBoxLU.y > vBoundingBoxRL.y)
1671  {
1672  Math2d::SetVec(vTemp, vBoundingBoxLU);
1673  Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRL);
1674  Math2d::SetVec(vBoundingBoxRL, vTemp);
1675  }
1676 
1677  if (vBoundingBoxRU.y > vBoundingBoxLL.y)
1678  {
1679  Math2d::SetVec(vTemp, vBoundingBoxRU);
1680  Math2d::SetVec(vBoundingBoxRU, vBoundingBoxLL);
1681  Math2d::SetVec(vBoundingBoxLL, vTemp);
1682  }
1683 
1684  if (vBoundingBoxRU.y > vBoundingBoxRL.y)
1685  {
1686  Math2d::SetVec(vTemp, vBoundingBoxRU);
1687  Math2d::SetVec(vBoundingBoxRU, vBoundingBoxRL);
1688  Math2d::SetVec(vBoundingBoxRL, vTemp);
1689  }
1690 
1691  if (vBoundingBoxLU.x > vBoundingBoxRU.x)
1692  {
1693  Math2d::SetVec(vTemp, vBoundingBoxLU);
1694  Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRU);
1695  Math2d::SetVec(vBoundingBoxRU, vTemp);
1696  }
1697 
1698  if (vBoundingBoxLL.x > vBoundingBoxRL.x)
1699  {
1700  Math2d::SetVec(vTemp, vBoundingBoxLL);
1701  Math2d::SetVec(vBoundingBoxLL, vBoundingBoxRL);
1702  Math2d::SetVec(vBoundingBoxRL, vTemp);
1703  }
1704 
1705  //ARMARX_VERBOSE << "GetHypothesisPrincipalAxesAndBoundingBox(): BB: (" << vBoundingBoxLU.x << ", " << vBoundingBoxLU.y << ") (" << vBoundingBoxRU.x << ", "
1706  // << vBoundingBoxRU.y << ") (" << vBoundingBoxLL.x << ", " << vBoundingBoxLL.y << ") (" << vBoundingBoxRL.x << ", " << vBoundingBoxRL.y << ")";
1707 }
1708 
1709 void
1710 ObjectLearningByPushing::ReportObjectPositionInformationToObserver()
1711 {
1712  Vec3d vHypothesisPosition, vPrincipalAxis1, vPrincipalAxis2, vPrincipalAxis3, vEigenValues,
1713  vMaxExtents;
1714  Vec2d vBoundingBoxLeftUpper, vBoundingBoxRightUpper, vBoundingBoxLeftLower,
1715  vBoundingBoxRightLower;
1716  float fHypothesisExtent;
1717  vHypothesisPosition = GetObjectPosition(fHypothesisExtent);
1718  GetHypothesisPrincipalAxesAndBoundingBox(vPrincipalAxis1,
1719  vPrincipalAxis2,
1720  vPrincipalAxis3,
1721  vEigenValues,
1722  vMaxExtents,
1723  vBoundingBoxLeftUpper,
1724  vBoundingBoxRightUpper,
1725  vBoundingBoxLeftLower,
1726  vBoundingBoxRightLower);
1727 
1728  Eigen::Vector3f vec(vHypothesisPosition.x, vHypothesisPosition.y, vHypothesisPosition.z);
1729  armarx::FramedPositionPtr hypothesisPosition =
1730  new armarx::FramedPosition(vec, cameraFrameName, localRobot->getName());
1731  armarx::Vector3Ptr principalAxis1, principalAxis2, principalAxis3, eigenvalues;
1732  principalAxis1 = new armarx::Vector3();
1733  principalAxis1->x = vPrincipalAxis1.x;
1734  principalAxis1->y = vPrincipalAxis1.y;
1735  principalAxis1->z = vPrincipalAxis1.z;
1736  principalAxis2 = new armarx::Vector3();
1737  principalAxis2->x = vPrincipalAxis2.x;
1738  principalAxis2->y = vPrincipalAxis2.y;
1739  principalAxis2->z = vPrincipalAxis2.z;
1740  principalAxis3 = new armarx::Vector3();
1741  principalAxis3->x = vPrincipalAxis3.x;
1742  principalAxis3->y = vPrincipalAxis3.y;
1743  principalAxis3->z = vPrincipalAxis3.z;
1744  eigenvalues = new armarx::Vector3();
1745  eigenvalues->x = vEigenValues.x;
1746  eigenvalues->y = vEigenValues.y;
1747  eigenvalues->z = vEigenValues.z;
1748 
1749 
1750  FramedPositionPtr globalObjPose = hypothesisPosition->toGlobal(localRobot);
1751  debugDrawer->setSphereVisu(getName(), "GlobalObj", globalObjPose, DrawColor{0, 0, 1, 1}, 50.0);
1752 
1753  listener->reportObjectHypothesisPosition(hypothesisPosition,
1754  fHypothesisExtent,
1755  principalAxis1,
1756  principalAxis2,
1757  principalAxis3,
1758  eigenvalues);
1759 }
1760 
1761 void
1762 ObjectLearningByPushing::SetHeadToPlatformTransformation(Vec3d vTranslation,
1763  Mat3d mRotation,
1764  bool bResetOldTransformation)
1765 {
1766  if (bResetOldTransformation)
1767  {
1768  Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, vTranslation);
1769  Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, mRotation);
1770  }
1771  else
1772  {
1773  Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation,
1774  m_tHeadToPlatformTransformation.translation);
1775  Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation,
1776  m_tHeadToPlatformTransformation.rotation);
1777  }
1778 
1779  Math3d::SetVec(m_tHeadToPlatformTransformation.translation, vTranslation);
1780  Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, mRotation);
1781 }
1782 
1783 void
1784 ObjectLearningByPushing::LoadAndFuseObjectSegmentations(std::string sObjectName)
1785 {
1786 
1787  const int nFileIndexStart = 1;
1788 
1789  for (int nMaxFileIndex = 2; nMaxFileIndex <= 51; nMaxFileIndex++)
1790  {
1791  std::vector<CObjectHypothesis*> aObjectHypotheses;
1792  CObjectHypothesis* pHypothesis;
1793 
1794  if (!CObjectRecognition::LoadObjectDescriptorRGBD(sObjectName, nMaxFileIndex, pHypothesis))
1795  {
1796  return;
1797  }
1798 
1799  delete pHypothesis;
1800 
1801  int nFileIndex = nFileIndexStart;
1802 
1803  while (CObjectRecognition::LoadObjectDescriptorRGBD(sObjectName, nFileIndex, pHypothesis) &&
1804  nFileIndex <= nMaxFileIndex)
1805  {
1806  aObjectHypotheses.push_back(pHypothesis);
1807  nFileIndex++;
1808  }
1809 
1810  pHypothesis = NULL;
1811  CSegmentedPointCloudFusion::FusePointClouds(aObjectHypotheses, pHypothesis);
1812 
1814  pHypothesis, sObjectName + "-fused", aObjectHypotheses.size());
1816  pHypothesis, sObjectName + "-fused", aObjectHypotheses.size());
1817 
1818  for (size_t i = 0; i < pHypothesis->aVisibleConfirmedPoints.size(); i++)
1819  {
1820  pHypothesis->aVisibleConfirmedPoints.at(i)->vPosition.z += 250;
1821  }
1822 
1823  CByteImage* pImg = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
1824  COLPTools::DrawHypothesis(pHypothesis, calibration, pImg, aObjectHypotheses.size());
1825  std::string sFileName = OLP_OBJECT_LEARNING_DIR + sObjectName + "-fused00.bmp";
1826  COLPTools::SetNumberInFileName(sFileName, nMaxFileIndex, 2);
1827  pImg->SaveToFile(sFileName.c_str());
1828  delete pImg;
1829 
1830  for (size_t i = 0; i < aObjectHypotheses.size(); i++)
1831  {
1832  delete aObjectHypotheses.at(i);
1833  }
1834 
1835  delete pHypothesis;
1836  }
1837 }
1838 
1840 ObjectLearningByPushing::SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints,
1841  const bool bPreferCentralObject)
1842 {
1843  CObjectHypothesis* pHypothesis = NULL;
1844  pPoints = NULL;
1845 
1846  CObjectHypothesisArray* hypotheses = NULL;
1847 
1848  if (m_pConfirmedHypotheses->GetSize() > 0)
1849  {
1850  hypotheses = m_pConfirmedHypotheses;
1851  }
1852  else if (m_pInitialHypothesesAtLocalMaxima->GetSize() > 0)
1853  {
1854  hypotheses = m_pInitialHypothesesAtLocalMaxima;
1855  }
1856 
1857  if (hypotheses)
1858  {
1859  if (bPreferCentralObject)
1860  {
1861  // get central point in camera coordinates
1862  Vec3d vCenterPlatform = {OLP_CENTRAL_POSITION_FOR_PUSHING_X,
1865  Mat3d mRotInv;
1866  Vec3d vCenter, vTemp;
1867  Math3d::SubtractVecVec(
1868  vCenterPlatform, m_tHeadToPlatformTransformation.translation, vTemp);
1869  Math3d::Transpose(m_tHeadToPlatformTransformation.rotation, mRotInv);
1870  Math3d::MulMatVec(mRotInv, vTemp, vCenter);
1871  ARMARX_VERBOSE << "Central point in platform cs: " << vCenterPlatform.x << ", "
1872  << vCenterPlatform.y << ", " << vCenterPlatform.z;
1873  ARMARX_VERBOSE << "m_tHeadToPlatformTransformation.translation: "
1874  << m_tHeadToPlatformTransformation.translation.x << ", "
1875  << m_tHeadToPlatformTransformation.translation.y << ", "
1876  << m_tHeadToPlatformTransformation.translation.z;
1877  ARMARX_VERBOSE << "m_tHeadToPlatformTransformation.rotation: "
1878  << m_tHeadToPlatformTransformation.rotation.r1 << ", "
1879  << m_tHeadToPlatformTransformation.rotation.r2 << ", "
1880  << m_tHeadToPlatformTransformation.rotation.r3 << ", "
1881  << m_tHeadToPlatformTransformation.rotation.r4 << ", "
1882  << m_tHeadToPlatformTransformation.rotation.r5 << ", "
1883  << m_tHeadToPlatformTransformation.rotation.r6 << ", "
1884  << m_tHeadToPlatformTransformation.rotation.r7 << ", "
1885  << m_tHeadToPlatformTransformation.rotation.r8 << ", "
1886  << m_tHeadToPlatformTransformation.rotation.r9;
1887  ARMARX_VERBOSE << "Central point in camera cs: " << vCenter.x << ", " << vCenter.y
1888  << ", " << vCenter.z;
1889  //Math3d::SetVec(vCenter, 0, 0, 500);
1890  //ARMARX_IMPORTANT << "Manually set central point in camera coordinates: " << vCenter.x << ", " << vCenter.y << ", " << vCenter.z;
1891 
1892 
1893  int nBestIndex = 0;
1894  float fMinDist = Math3d::Distance((*hypotheses)[0]->vCenter, vCenter);
1895 
1896  for (int i = 1; i < hypotheses->GetSize(); i++)
1897  {
1898  if (Math3d::Distance((*hypotheses)[i]->vCenter, vCenter) < fMinDist)
1899  {
1900  // only accept hypotheses that contain at least half as many points as the biggest one
1901  //if (2*(*m_pInitialHypothesesAtLocalMaxima)[i]->aNewPoints.size() > (*m_pInitialHypothesesAtLocalMaxima)[0]->aNewPoints.size())
1902  {
1903  fMinDist = Math3d::Distance((*hypotheses)[i]->vCenter, vCenter);
1904  nBestIndex = i;
1905  }
1906  }
1907  }
1908 
1909  pHypothesis = (*hypotheses)[nBestIndex];
1910  }
1911  else
1912  {
1913  pHypothesis = (*hypotheses)[0];
1914  }
1915  }
1916 
1917  if (pHypothesis)
1918  {
1919  if (pHypothesis->aConfirmedPoints.size() > 0)
1920  {
1921  pPoints = &(pHypothesis->aConfirmedPoints);
1922  }
1923  else
1924  {
1925  pPoints = &(pHypothesis->aNewPoints);
1926  }
1927 
1928  ARMARX_VERBOSE << "Hypothesis " << pHypothesis->nHypothesisNumber << " ("
1929  << pHypothesis->vCenter.x << ", " << pHypothesis->vCenter.y << ", "
1930  << pHypothesis->vCenter.z << ") is chosen for pushing";
1931  }
1932  else
1933  {
1934  ARMARX_WARNING << "No hypothesis available";
1935  }
1936 
1937  return pHypothesis;
1938 }
1939 
1940 void
1941 ObjectLearningByPushing::convertFileOLPtoPCL(std::string filename, bool includeCandidatePoints)
1942 {
1943  setlocale(LC_NUMERIC, "C");
1944 
1945  FILE* filePtr = fopen(filename.c_str(), "rt");
1946 
1947  if (filePtr == NULL)
1948  {
1949  ARMARX_WARNING << "Cannot open object file: " << filename;
1950  return;
1951  }
1952 
1953  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr objectPointCloud(
1954  new pcl::PointCloud<pcl::PointXYZRGBA>());
1955 
1956  // read the confirmed points
1957  size_t nNumPoints;
1958  float r, g, b, intensity;
1959  pcl::PointXYZRGBA point;
1960  int dummy = fscanf(filePtr, "%lu", &nNumPoints);
1961 
1962  for (size_t i = 0; i < nNumPoints; i++)
1963  {
1964  dummy += fscanf(filePtr, "%e %e %e", &point.x, &point.y, &point.z);
1965  dummy += fscanf(filePtr, "%e %e %e %e \n", &r, &g, &b, &intensity);
1966  point.r = r * 255 * intensity;
1967  point.g = g * 255 * intensity;
1968  point.b = b * 255 * intensity;
1969  objectPointCloud->push_back(point);
1970  }
1971 
1972  if (includeCandidatePoints)
1973  {
1974  // read the candidate points
1975  dummy += fscanf(filePtr, "%lu", &nNumPoints);
1976 
1977  for (size_t i = 0; i < nNumPoints; i++)
1978  {
1979  dummy += fscanf(filePtr, "%e %e %e", &point.x, &point.y, &point.z);
1980  dummy += fscanf(filePtr, "%e %e %e %e \n", &r, &g, &b, &intensity);
1981  point.r = r * 255 * intensity;
1982  point.g = g * 255 * intensity;
1983  point.b = b * 255 * intensity;
1984  objectPointCloud->push_back(point);
1985  }
1986  }
1987 
1988  int n = (int)filename.length();
1989  filename.at(n - 3) = 'p';
1990  filename.at(n - 2) = 'c';
1991  filename.at(n - 1) = 'd';
1992  pcl::io::savePCDFile(filename, *objectPointCloud);
1993 }
1994 
1995 void
1996 ObjectLearningByPushing::BoundingBoxInForegroundImage(CByteImage* image,
1997  int minX,
1998  int maxX,
1999  int minY,
2000  int maxY)
2001 {
2002  for (int i = 0; i < image->height; i++)
2003  {
2004  if (i < minY || i > maxY)
2005  {
2006  for (int j = 0; j < image->width; j++)
2007  {
2008  if (j < minX || j > maxX)
2009  {
2010  image->pixels[i * image->width + j] = 0;
2011  }
2012  }
2013  }
2014  }
2015 }
CObjectRecognition::SaveObjectDescriptorPCD
static void SaveObjectDescriptorPCD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:58
visionx::tools::convertIVTtoEigen
Eigen::Matrix3f convertIVTtoEigen(const Mat3d m)
Definition: TypeMapping.cpp:639
CObjectRecognition::FindObjectRGBD
static void FindObjectRGBD(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, Vec3d &vPosition, Mat3d &mOrientation, float &distance, float &fProbability, CByteImage *pResultImage=NULL)
Definition: ObjectRecognition.cpp:563
PointCloudRegistration.h
HypothesisGeneration.h
ARMARX_VERBOSE
#define ARMARX_VERBOSE
Definition: Logging.h:187
armarx::eFull
@ eFull
Definition: MorphingItem.h:35
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
visionx::ObjectLearningByPushing::getReferenceFrameName
std::string getReferenceFrameName(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:142
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:737
ARMARX_IMPORTANT
#define ARMARX_IMPORTANT
Definition: Logging.h:190
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CHypothesisValidationRGBD::ValidateInitialHypotheses
static void ValidateInitialHypotheses(const std::vector< CHypothesisPoint * > &aOldDepthMapPoints, const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesisArray *pObjectHypotheses, CObjectHypothesisArray *pConfirmedHypotheses)
Definition: HypothesisValidationRGBD.cpp:42
visionx
ArmarX headers.
Definition: OpenPoseStressTest.h:38
armarx::navigation::core::Pose
Eigen::Isometry3f Pose
Definition: basic_types.h:31
CSegmentedPointCloudFusion::FusePointClouds
void FusePointClouds(std::vector< CObjectHypothesis * > aHypotheses, CObjectHypothesis *&pFusedHypothesis)
Definition: SegmentedPointCloudFusion.cpp:38
OLP_DEPTH_MAP_PIXEL_DISTANCE
#define OLP_DEPTH_MAP_PIXEL_DISTANCE
Definition: ObjectLearningByPushingDefinitions.h:269
ObjectLearningByPushing.h
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:164
visionx::ObjectLearningByPushing::onInitPointCloudAndImageProcessor
void onInitPointCloudAndImageProcessor() override
Setup the vision component.
Definition: ObjectLearningByPushing.cpp:184
visionx::ObjectLearningByPushing::recognizeObject
void recognizeObject(const std::string &objectName, const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
Definition: ObjectLearningByPushing.cpp:165
OLP_SAVE_COMPLETE_POINTCLOUD
#define OLP_SAVE_COMPLETE_POINTCLOUD
Definition: ObjectLearningByPushingDefinitions.h:218
GfxTL::Matrix4f
MatrixXX< 4, 4, float > Matrix4f
Definition: MatrixXX.h:650
visionx::ObjectLearningByPushing::ValidateInitialObjectHypotheses
void ValidateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Validates the initial object hypotheses after the first push.
Definition: ObjectLearningByPushing.cpp:79
COLPTools::GetHypothesesIntersectionRatio
float GetHypothesesIntersectionRatio(const CObjectHypothesis *pHypothesis1, const CObjectHypothesis *pHypothesis2, const CCalibration *calibration)
Definition: OLPTools.cpp:1480
COLPTools::SetNumberInFileName
void SetNumberInFileName(std::string &sFileName, int nNumber, int nNumDigits)
Definition: OLPTools.cpp:1464
FeatureCalculation.h
CHypothesisValidationRGBD::RevalidateHypotheses
static void RevalidateHypotheses(const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesisArray *pObjectHypotheses)
Definition: HypothesisValidationRGBD.cpp:137
c
constexpr T c
Definition: UnscentedKalmanFilterTest.cpp:46
visionx::ImageProviderInfo::imageFormat
ImageFormatInfo imageFormat
Image format struct that contains all necessary image information.
Definition: ImageProcessor.h:509
COLPTools::CalculateForegroundRatioOfHypothesis
void CalculateForegroundRatioOfHypothesis(const CObjectHypothesis *pHypothesis, const CByteImage *pForegroundImage, const CCalibration *calibration, float &fForegroundRatio, int &nNumForegroundPixels)
Definition: OLPTools.cpp:383
visionx::tools::createByteImage
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.
HypothesisValidationRGBD.h
OLP_CENTRAL_POSITION_FOR_PUSHING_X
#define OLP_CENTRAL_POSITION_FOR_PUSHING_X
Definition: ObjectLearningByPushingDefinitions.h:331
visionx::ImageProviderInfo
Definition: ImageProcessor.h:479
CHypothesisVisualization
Definition: HypothesisVisualization.h:38
CGaussBackground
Definition: GaussBackground.h:48
IceInternal::Handle< Vector3 >
visionx::tools::convert
CByteImage::ImageType convert(const ImageType visionxImageType)
Converts a VisionX image type into an image type of IVT's ByteImage.
Definition: TypeMapping.cpp:97
visionx::ObjectLearningByPushing::onExitPointCloudAndImageProcessor
void onExitPointCloudAndImageProcessor() override
Exit the ImapeProcessor component.
Definition: ObjectLearningByPushing.cpp:367
CFeatureCalculation
Definition: FeatureCalculation.h:40
COLPTools::CalculateEdgeImageFromImageAndDisparity
void CalculateEdgeImageFromImageAndDisparity(CByteImage *pImageGray, CByteImage *pDisparity, CByteImage *pEdgeImage)
Definition: OLPTools.cpp:1726
visionx::ObjectLearningByPushing::RevalidateConfirmedObjectHypotheses
void RevalidateConfirmedObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Re-validates the confirmed object hypotheses after the second and later pushes.
Definition: ObjectLearningByPushing.cpp:86
OLPTools.h
CObjectHypothesis::vLastTranslation
Vec3d vLastTranslation
Definition: ObjectHypothesis.h:322
CSaliencyCalculation::FindLocalMaxima
void FindLocalMaxima(const std::vector< Vec3d > &aPoints3D, const std::vector< Vec2d > &aPointsInImage, std::vector< Vec3d > &aMaxima, const int nBinSizeInPx, CByteImage *pMaximumnessImage)
Definition: SaliencyCalculation.cpp:43
ObjectRecognition.h
CObjectRecognition::FindAllObjectsRGBD
static void FindAllObjectsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, std::vector< std::string > &aNames, std::vector< Vec3d > &aPositions, std::vector< Mat3d > &aOrientations, std::vector< float > &aProbabilities)
Definition: ObjectRecognition.cpp:666
ARMARX_DEBUG
#define ARMARX_DEBUG
Definition: Logging.h:184
CObjectHypothesis::vCenter
Vec3d vCenter
Definition: ObjectHypothesis.h:311
CObjectHypothesis::nHypothesisNumber
int nHypothesisNumber
Definition: ObjectHypothesis.h:290
OLP_OBJECT_LEARNING_DIR
#define OLP_OBJECT_LEARNING_DIR
Definition: ObjectLearningByPushingDefinitions.h:175
visionx::ObjectLearningByPushing::getLastObjectTransformation
armarx::PoseBasePtr getLastObjectTransformation(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
Definition: ObjectLearningByPushing.cpp:148
CObjectHypothesis::aVisibleConfirmedPoints
std::vector< CHypothesisPoint * > aVisibleConfirmedPoints
Definition: ObjectHypothesis.h:297
armarx::VariantType::Long
const VariantTypeId Long
Definition: Variant.h:918
visionx::ObjectLearningByPushing::getScenePoints
visionx::types::PointList getScenePoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:115
filename
std::string filename
Definition: VisualizationRobot.cpp:86
visionx::ObjectLearningByPushing::getObjectHypothesisPoints
visionx::types::PointList getObjectHypothesisPoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the confirmed points constituting the object hypothesis.
Definition: ObjectLearningByPushing.cpp:93
OLP_MAKE_RESULT_SCREENSHOTS
#define OLP_MAKE_RESULT_SCREENSHOTS
Definition: ObjectLearningByPushingDefinitions.h:216
SegmentedPointCloudFusion.h
GaussBackground.h
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:736
HypothesisVisualization.h
CObjectHypothesis
Definition: ObjectHypothesis.h:244
CObjectRecognition::LoadObjectDescriptorRGBD
static bool LoadObjectDescriptorRGBD(const std::string sObjectName, const int nDescriptorNumber, CObjectHypothesis *&pHypothesis)
Definition: ObjectRecognition.cpp:199
CObjectHypothesis::fMaxExtent
float fMaxExtent
Definition: ObjectHypothesis.h:319
OLP_SAVE_CONFIRMED_OBJECT
#define OLP_SAVE_CONFIRMED_OBJECT
Definition: ObjectLearningByPushingDefinitions.h:219
visionx::ImageProviderInfo::destinationImageType
ImageType destinationImageType
Required image destination type.
Definition: ImageProcessor.h:504
OLP_MAKE_INTERMEDIATE_SCREENSHOTS
#define OLP_MAKE_INTERMEDIATE_SCREENSHOTS
Definition: ObjectLearningByPushingDefinitions.h:217
CObjectHypothesisArray
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
Definition: ObjectHypothesis.h:362
CObjectHypothesis::mLastRotation
Mat3d mLastRotation
Definition: ObjectHypothesis.h:321
float
#define float
Definition: 16_Level.h:22
ARMARX_INFO
#define ARMARX_INFO
Definition: Logging.h:181
CObjectHypothesis::aNewPoints
std::vector< CHypothesisPoint * > aNewPoints
Definition: ObjectHypothesis.h:294
visionx::ObjectLearningByPushing::process
void process() override
Process the vision component.
Definition: ObjectLearningByPushing.cpp:443
COLPTools::DrawHypothesis
void DrawHypothesis(CObjectHypothesis *pHypothesis, const CCalibration *calibration, CByteImage *pImage, const int nNumFusedHypotheses)
Definition: OLPTools.cpp:1516
memoryx::KBM::Vector3
Eigen::Vector3d Vector3
Definition: kbm.h:43
ImageUtil.h
OLP_CENTRAL_POSITION_FOR_PUSHING_Z
#define OLP_CENTRAL_POSITION_FOR_PUSHING_Z
Definition: ObjectLearningByPushingDefinitions.h:333
CObjectHypothesis::aConfirmedPoints
std::vector< CHypothesisPoint * > aConfirmedPoints
Definition: ObjectHypothesis.h:295
OLP_CENTRAL_POSITION_FOR_PUSHING_Y
#define OLP_CENTRAL_POSITION_FOR_PUSHING_Y
Definition: ObjectLearningByPushingDefinitions.h:332
TypeMapping.h
OLP_SCREENSHOT_PATH
#define OLP_SCREENSHOT_PATH
Definition: ObjectLearningByPushingDefinitions.h:221
visionx::ObjectLearningByPushing::CreateInitialObjectHypotheses
void CreateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Creates the initial object hypotheses.
Definition: ObjectLearningByPushing.cpp:72
GfxTL::Matrix3f
MatrixXX< 3, 3, float > Matrix3f
Definition: MatrixXX.h:649
armarx::VariantType::FramedPosition
const VariantTypeId FramedPosition
Definition: FramedPose.h:38
ARMARX_WARNING
#define ARMARX_WARNING
Definition: Logging.h:193
SaliencyCalculation.h
CHypothesisGeneration
Definition: HypothesisGeneration.h:49
CObjectHypothesis::aHypothesisHasMoved
std::vector< bool > aHypothesisHasMoved
Definition: ObjectHypothesis.h:300
visionx::ObjectLearningByPushing::getUpwardsVector
armarx::Vector3BasePtr getUpwardsVector(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:135
CObjectRecognition::FindObjectWithManyDescriptorsRGBD
static void FindObjectWithManyDescriptorsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, const std::string objectName, const int numDescriptors)
Definition: ObjectRecognition.cpp:753
CObjectRecognition::SaveObjectDescriptorRGBD
static void SaveObjectDescriptorRGBD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:110
armarx
This file offers overloads of toIce() and fromIce() functions for STL container types.
Definition: ArmarXTimeserver.cpp:27
visionx::ObjectLearningByPushing::onConnectPointCloudAndImageProcessor
void onConnectPointCloudAndImageProcessor() override
Implement this method in your PointCloudAndImageProcessor in order execute parts when the component i...
Definition: ObjectLearningByPushing.cpp:213