40 #include <Calibration/Calibration.h>
41 #include <Calibration/StereoCalibration.h>
42 #include <Image/ByteImage.h>
43 #include <Image/ImageProcessor.h>
44 #include <Math/FloatMatrix.h>
45 #include <Math/LinearAlgebra.h>
53 #include <VisionX/interface/components/Calibration.h>
61 #include <Eigen/Geometry>
63 #include <pcl/io/pcd_io.h>
64 #include <pcl/point_cloud.h>
65 #include <pcl/point_types.h>
74 listener->resetHypothesesStatus();
75 currentState = eCreatingInitialHypotheses;
81 listener->resetHypothesesStatus();
82 currentState = eValidatingInitialHypotheses;
88 listener->resetHypothesesStatus();
89 currentState = eRevalidatingConfirmedHypotheses;
95 types::PointList hypothesisPoints;
96 std::vector<CHypothesisPoint*>* pPoints;
97 SelectPreferredHypothesis(pPoints);
101 for (
size_t i = 0; i < pPoints->size(); i++)
103 Eigen::Vector3f p(pPoints->at(i)->vPosition.x,
104 pPoints->at(i)->vPosition.y,
105 pPoints->at(i)->vPosition.z);
107 hypothesisPoints.push_back(point);
111 return hypothesisPoints;
117 types::PointList scenePoints;
118 std::vector<CHypothesisPoint*>* points = m_pAllNewDepthMapPoints;
119 if (m_pAllNewDepthMapPoints->size() == 0)
121 points = m_pAllOldDepthMapPoints;
124 for (
size_t i = 0; i < points->size(); i++)
127 points->at(i)->vPosition.x, points->at(i)->vPosition.y, points->at(i)->vPosition.z);
129 scenePoints.push_back(point);
137 Vector3BasePtr upVector =
new Vector3(upwardsVector.x, upwardsVector.y, upwardsVector.z);
144 return cameraFrameName;
150 std::vector<CHypothesisPoint*>* pPoints;
152 PoseBasePtr result = NULL;
158 result =
new Pose(rot, trans);
167 UpdateDataFromRobot();
168 SetHeadToPlatformTransformation(
169 tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation,
true);
171 m_pFeatureCalculation->GetAllFeaturePoints(colorImageLeft,
175 *m_pAllNewSIFTPoints,
177 *m_pAllNewDepthMapPoints,
180 RecognizeHypotheses(colorImageLeft, objectName);
191 imageProviderName = getProperty<std::string>(
"ImageProviderAdapterName").getValue();
192 usingImageProvider(imageProviderName);
194 robotStateProxyName = getProperty<std::string>(
"RobotStateProxyName").getValue();
195 usingProxy(robotStateProxyName);
198 cameraFrameName = getProperty<std::string>(
"CameraFrameName").getValue();
201 currentState = eNoHypotheses;
204 offeringTopic(
"ObjectLearningByPushing");
205 pointcloudProviderName = getProperty<std::string>(
"PointCloudProviderAdapterName").getValue();
206 usingPointCloudProvider(pointcloudProviderName);
209 offeringTopic(getProperty<std::string>(
"DebugDrawerTopicName").getValue());
215 ARMARX_VERBOSE <<
"entering onConnectPointCloudAndImageProcessor()";
218 ARMARX_INFO <<
"Connecting to " << pointcloudProviderName;
221 pointcloudProviderProxy = getProxy<RGBDPointCloudProviderInterfacePrx>(pointcloudProviderName);
222 calibration =
tools::convert(pointcloudProviderProxy->getStereoCalibration().calibrationLeft);
224 if (cameraFrameName != pointcloudProviderProxy->getReferenceFrame())
226 ARMARX_WARNING <<
"Specified camera frame name: '" << cameraFrameName
227 <<
"' does not match frame given by provider '"
228 << pointcloudProviderProxy->getReferenceFrame() <<
"'!";
232 if (pointcloudProviderProxy->getReferenceFrame() ==
"DepthCameraSim")
234 ARMARX_WARNING <<
"Simulation detected. Applying fix for DepthCameraSim missalignment.";
238 pointcloudProviderProxy->getStereoCalibration().calibrationLeft.cameraParam.rotation);
242 worldToCamera << R.r1, R.r2, R.r3, R.r4, R.r5, R.r6, R.r7, R.r8, R.r9;
244 cameraToImg << 0, -1, 0, 1, 0, 0, 0, 0, 1;
246 worldToCamera = cameraToImg * worldToCamera;
247 R.r1 = worldToCamera(0, 0);
248 R.r2 = worldToCamera(0, 1);
249 R.r3 = worldToCamera(0, 2);
250 R.r4 = worldToCamera(1, 0);
251 R.r5 = worldToCamera(1, 1);
252 R.r6 = worldToCamera(1, 2);
253 R.r7 = worldToCamera(2, 0);
254 R.r8 = worldToCamera(2, 1);
255 R.r9 = worldToCamera(2, 2);
257 calibration->SetRotation(R);
260 pointCloudPtr.reset(
new pcl::PointCloud<pcl::PointXYZRGBA>());
263 ARMARX_INFO <<
"Connecting to " << imageProviderName;
265 imageProviderProxy = getProxy<ImageProviderInterfacePrx>(imageProviderName);
270 colorImageLeftOld =
new CByteImage(colorImageLeft);
272 new CByteImage(colorImageLeft->width, colorImageLeft->height, CByteImage::eGrayScale);
273 greyImageRight =
new CByteImage(greyImageLeft);
274 resultImageLeft =
new CByteImage(colorImageLeft);
275 resultImageRight =
new CByteImage(colorImageLeft);
276 tempResultImage =
new CByteImage(colorImageLeft);
278 cameraImages =
new CByteImage*[2];
279 cameraImages[0] = colorImageLeft;
280 cameraImages[1] = colorImageRight;
282 resultImages =
new CByteImage*[2];
283 resultImages[0] = resultImageLeft;
284 resultImages[1] = resultImageRight;
299 iterationCounter = 0;
302 ARMARX_INFO <<
"Connecting to " << robotStateProxyName;
303 robotStateProxy = getProxy<RobotStateComponentInterfacePrx>(robotStateProxyName);
306 localRobot = RemoteRobot::createLocalCloneFromFile(
311 #if defined OMP_NUM_THREADS
312 nNumOMPThreads = OMP_NUM_THREADS;
313 #elif defined OLP_USE_DBVISION
314 nNumOMPThreads = omp_get_num_procs() - 1;
316 nNumOMPThreads = omp_get_num_procs();
319 if (nNumOMPThreads < 1)
324 omp_set_num_threads(nNumOMPThreads);
331 m_pAllNewMSERs =
new std::vector<CMSERDescriptor3D*>();
332 m_pAllOldMSERs =
new std::vector<CMSERDescriptor3D*>();
333 m_pCorrespondingMSERs =
new std::vector<CMSERDescriptor3D*>();
334 m_pAllNewDepthMapPoints =
new std::vector<CHypothesisPoint*>();
335 m_pAllOldDepthMapPoints =
new std::vector<CHypothesisPoint*>();
339 Math3d::SetVec(m_tHeadToPlatformTransformation.translation, Math3d::zero_vec);
340 Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, Math3d::zero_vec);
341 Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, Math3d::unit_mat);
342 Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, Math3d::unit_mat);
344 Math3d::SetVec(upwardsVector, Math3d::zero_vec);
347 listener = getTopic<ObjectLearningByPushingListenerPrx>(
"ObjectLearningByPushing");
353 m_pSegmentedBackgroundImage =
359 debugDrawer = getTopic<DebugDrawerInterfacePrx>(
360 getProperty<std::string>(
"DebugDrawerTopicName").getValue());
361 debugDrawer->clearLayer(getName());
371 delete m_pHypothesisVisualization;
372 delete m_pHypothesisGeneration;
374 for (
int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
376 delete (*m_pObjectHypotheses)[i];
379 delete m_pObjectHypotheses;
381 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
383 delete (*m_pConfirmedHypotheses)[i];
386 delete m_pConfirmedHypotheses;
387 delete m_pInitialHypothesesAtLocalMaxima;
390 for (
int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
392 delete (*m_pAllNewSIFTPoints)[i];
395 delete m_pAllNewSIFTPoints;
397 for (
int i = 0; i < m_pAllOldSIFTPoints->GetSize(); i++)
399 delete (*m_pAllOldSIFTPoints)[i];
402 delete m_pAllOldSIFTPoints;
404 for (
size_t i = 0; i < m_pAllNewMSERs->size(); i++)
406 delete m_pAllNewMSERs->at(i);
409 delete m_pAllNewMSERs;
411 for (
size_t i = 0; i < m_pAllOldMSERs->size(); i++)
413 delete m_pAllOldMSERs->at(i);
416 delete m_pAllOldMSERs;
418 for (
size_t i = 0; i < m_pCorrespondingMSERs->size(); i++)
420 delete m_pCorrespondingMSERs->at(i);
423 delete m_pCorrespondingMSERs;
425 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
427 delete m_pAllNewDepthMapPoints->at(i);
430 delete m_pAllNewDepthMapPoints;
432 for (
size_t i = 0; i < m_pAllOldDepthMapPoints->size(); i++)
434 delete m_pAllOldDepthMapPoints->at(i);
437 delete m_pAllOldDepthMapPoints;
439 delete m_pGaussBackground;
447 ARMARX_INFO <<
"process() called before ready (weird race condition with the "
448 "ImageProcessor::process)";
454 std::lock_guard<std::mutex> lock(processingLock);
456 switch (currentState)
458 case eCreatingInitialHypotheses:
461 UpdateDataFromRobot();
462 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation,
463 tHeadToPlatformTransformation.rotation,
467 CreateInitialObjectHypothesesInternal(
468 greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
471 VisualizeHypotheses(greyImageLeft,
479 provideResultImages(resultImages);
482 if (GetNumberOfNonconfirmedHypotheses() > 0)
484 ARMARX_INFO <<
"Found " << GetNumberOfNonconfirmedHypotheses()
485 <<
" possible objects";
488 ReportObjectPositionInformationToObserver();
489 listener->reportInitialObjectHypothesesCreated(
true);
491 currentState = eHasInitialHypotheses;
497 listener->reportInitialObjectHypothesesCreated(
false);
499 currentState = eNoHypotheses;
507 case eValidatingInitialHypotheses:
510 ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
511 UpdateDataFromRobot();
512 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation,
513 tHeadToPlatformTransformation.rotation,
517 bool validationSuccessful = ValidateInitialObjectHypothesesInternal(
518 greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
521 VisualizeHypotheses(greyImageLeft,
530 VisualizeHypotheses(greyImageLeft,
538 provideResultImages(resultImages);
540 if (validationSuccessful)
542 ARMARX_INFO <<
"At least one object hypothesis was confirmed";
545 ReportObjectPositionInformationToObserver();
546 listener->reportObjectHypothesesValidated(
true);
548 currentState = eHasConfirmedHypotheses;
552 ARMARX_INFO <<
"No object hypothesis was confirmed";
554 listener->reportObjectHypothesesValidated(
false);
556 currentState = eNoHypotheses;
564 case eRevalidatingConfirmedHypotheses:
567 ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
568 UpdateDataFromRobot();
569 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation,
570 tHeadToPlatformTransformation.rotation,
574 bool validationSuccessful = RevalidateConfirmedObjectHypothesesInternal(
575 greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
578 VisualizeHypotheses(greyImageLeft,
586 provideResultImages(resultImages);
588 ARMARX_INFO <<
"The confirmed object hypotheses were revalidated";
589 ARMARX_IMPORTANT <<
"Was revalidation successful: " << validationSuccessful;
592 ReportObjectPositionInformationToObserver();
593 listener->reportObjectHypothesesValidated(validationSuccessful);
595 currentState = eHasConfirmedHypotheses;
602 case eHasInitialHypotheses:
603 case eHasConfirmedHypotheses:
605 provideResultImages(resultImages);
612 UpdateDataFromRobot();
613 ::ImageProcessor::CopyImage(cameraImages[0], resultImages[0]);
614 ::ImageProcessor::CopyImage(cameraImages[0], resultImages[1]);
615 provideResultImages(resultImages);
635 ObjectLearningByPushing::SwapAllPointsArraysToOld()
638 pTempSIFTPoints = m_pAllOldSIFTPoints;
639 m_pAllOldSIFTPoints = m_pAllNewSIFTPoints;
640 m_pAllNewSIFTPoints = pTempSIFTPoints;
642 for (
int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
644 delete (*m_pAllNewSIFTPoints)[i];
647 m_pAllNewSIFTPoints->Clear();
649 std::vector<CMSERDescriptor3D*>* pTempMSERs;
650 pTempMSERs = m_pAllOldMSERs;
651 m_pAllOldMSERs = m_pAllNewMSERs;
652 m_pAllNewMSERs = pTempMSERs;
654 for (
size_t i = 0; i < m_pAllNewMSERs->size(); i++)
656 delete m_pAllNewMSERs->at(i);
659 m_pAllNewMSERs->clear();
661 std::vector<CHypothesisPoint*>* pTempDepthMapPoints;
662 pTempDepthMapPoints = m_pAllOldDepthMapPoints;
663 m_pAllOldDepthMapPoints = m_pAllNewDepthMapPoints;
664 m_pAllNewDepthMapPoints = pTempDepthMapPoints;
666 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
668 delete m_pAllNewDepthMapPoints->at(i);
671 m_pAllNewDepthMapPoints->clear();
675 ObjectLearningByPushing::CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
676 CByteImage* pImageGreyRight,
677 CByteImage* pImageColorLeft,
678 CByteImage* pImageColorRight,
682 for (
int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
684 delete (*m_pObjectHypotheses)[i];
687 m_pObjectHypotheses->Clear();
689 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
691 delete (*m_pConfirmedHypotheses)[i];
694 m_pConfirmedHypotheses->Clear();
695 m_pInitialHypothesesAtLocalMaxima->Clear();
700 m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft,
704 *m_pAllNewSIFTPoints,
706 *m_pAllNewDepthMapPoints,
711 if (m_bMakeIntermediateScreenshots)
713 std::string sScreenshotFile = m_sScreenshotPath +
"disp0000.bmp";
715 m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
728 m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
731 m_pHypothesisGeneration->FindObjectHypotheses(pImageColorLeft,
735 *m_pAllNewSIFTPoints,
737 *m_pAllNewDepthMapPoints,
738 *m_pObjectHypotheses);
741 #ifdef OLP_FILTER_INITIAL_HYPOTHESES_WITH_MAXIMUMNESS
744 std::vector<Vec3d> aMaxima;
745 CByteImage* pMaximumnessImage =
748 m_tHeadToPlatformTransformation.rotation,
749 m_tHeadToPlatformTransformation.translation,
754 if (m_bMakeIntermediateScreenshots)
756 std::string sScreenshotFile = m_sScreenshotPath +
"max0000.bmp";
758 pMaximumnessImage->SaveToFile(sScreenshotFile.c_str());
761 for (
int n = 0; n < m_pObjectHypotheses->GetSize(); n++)
764 float fForegroundRatio;
765 int nNumForegroundPoints;
767 pHypothesis, pMaximumnessImage, calibration, fForegroundRatio, nNumForegroundPoints);
770 if (fForegroundRatio > 0.4f)
772 m_pInitialHypothesesAtLocalMaxima->AddElement(pHypothesis);
776 if ((m_pInitialHypothesesAtLocalMaxima->GetSize() == 0) && (m_pObjectHypotheses->GetSize() > 0))
778 m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[0]);
781 m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft,
783 *m_pInitialHypothesesAtLocalMaxima,
784 *m_pAllOldSIFTPoints,
786 *m_pCorrespondingMSERs,
790 m_bMakeIntermediateScreenshots);
792 delete pMaximumnessImage;
795 for (
int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
797 m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[i]);
803 SwapAllPointsArraysToOld();
807 ObjectLearningByPushing::ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
808 CByteImage* pImageGreyRight,
809 CByteImage* pImageColorLeft,
810 CByteImage* pImageColorRight,
816 for (
int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
818 delete m_pCorrespondingMSERs->at(i);
821 m_pCorrespondingMSERs->clear();
825 m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
826 BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
827 m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
832 if (m_bMakeIntermediateScreenshots)
834 std::string sScreenshotFile = m_sScreenshotPath +
"bg0000.bmp";
836 m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
840 m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft,
844 *m_pAllNewSIFTPoints,
846 *m_pAllNewDepthMapPoints,
850 if (m_bMakeIntermediateScreenshots)
852 std::string sScreenshotFile = m_sScreenshotPath +
"disp0000.bmp";
854 m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
857 CByteImage* pEdges =
new CByteImage(m_pDisparityImage);
859 pImageGreyLeft, m_pDisparityImage, pEdges);
860 sScreenshotFile = m_sScreenshotPath +
"edges0000.bmp";
862 pEdges->SaveToFile(sScreenshotFile.c_str());
867 ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
871 *m_pAllNewDepthMapPoints,
872 m_pSegmentedBackgroundImage,
877 m_pConfirmedHypotheses);
880 int nNumValidatedObjects = m_pConfirmedHypotheses->GetSize();
883 SwapAllPointsArraysToOld();
885 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
887 ARMARX_VERBOSE <<
"Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber
888 <<
" has been revalidated. "
889 << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size()
890 <<
" confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size()
891 <<
" new candidates. Position: (" << (*m_pConfirmedHypotheses)[i]->vCenter.x
892 <<
", " << (*m_pConfirmedHypotheses)[i]->vCenter.y <<
", "
893 << (*m_pConfirmedHypotheses)[i]->vCenter.z <<
")";
897 for (
int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
899 for (
int j = i; j > 0; j--)
901 if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >=
902 (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
908 (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
909 (*m_pConfirmedHypotheses)[j - 1] = pTemp;
915 SaveHistogramOfConfirmedHypothesis(
"NewObject", nImageNumber);
920 return (nNumValidatedObjects > 0);
924 ObjectLearningByPushing::RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft,
925 CByteImage* pImageGreyRight,
926 CByteImage* pImageColorLeft,
927 CByteImage* pImageColorRight,
933 for (
int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
935 delete m_pCorrespondingMSERs->at(i);
938 m_pCorrespondingMSERs->clear();
942 m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
943 BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
944 m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
949 if (m_bMakeIntermediateScreenshots)
951 std::string sScreenshotFile = m_sScreenshotPath +
"bg0000.bmp";
953 m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
958 m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft,
962 *m_pAllNewSIFTPoints,
964 *m_pAllNewDepthMapPoints,
973 std::vector<Vec3d> all3DPoints;
975 for (
size_t i = 0; i < pointCloudPtr->size(); i++)
977 pcl::PointXYZRGBA point = pointCloudPtr->at(i);
981 if (point.x * point.x + point.y * point.y + point.z * point.z > 0)
983 Vec3d point3d = {point.x, point.y, point.z};
984 all3DPoints.push_back(point3d);
990 setlocale(LC_NUMERIC,
"C");
992 sFileName.append(
"all3Dpoints00.dat");
994 FILE* pFile = fopen(sFileName.c_str(),
"wt");
995 fprintf(pFile,
"%ld \n", all3DPoints.size());
996 Vec3d center = {0, 0, 0};
998 for (
size_t i = 0; i < all3DPoints.size(); i++)
1002 all3DPoints.at(i).x,
1003 all3DPoints.at(i).y,
1004 all3DPoints.at(i).z);
1005 Math3d::AddToVec(center, all3DPoints.at(i));
1012 sFileName.append(
"all3Dpoints-centered00.dat");
1014 pFile = fopen(sFileName.c_str(),
"wt");
1015 fprintf(pFile,
"%ld \n", all3DPoints.size());
1016 Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
1018 for (
size_t i = 0; i < all3DPoints.size(); i++)
1022 all3DPoints.at(i).x - center.x,
1023 all3DPoints.at(i).y - center.y,
1024 all3DPoints.at(i).z - center.z);
1030 setlocale(LC_NUMERIC,
"C");
1032 sFileName.append(
"allusedpoints00.dat");
1034 pFile = fopen(sFileName.c_str(),
"wt");
1035 fprintf(pFile,
"%ld \n", all3DPoints.size());
1036 Math3d::SetVec(center, Math3d::zero_vec);
1038 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
1042 m_pAllNewDepthMapPoints->at(i)->vPosition.x,
1043 m_pAllNewDepthMapPoints->at(i)->vPosition.y,
1044 m_pAllNewDepthMapPoints->at(i)->vPosition.z);
1045 Math3d::AddToVec(center, m_pAllNewDepthMapPoints->at(i)->vPosition);
1052 sFileName.append(
"allusedpoints-centered00.dat");
1054 pFile = fopen(sFileName.c_str(),
"wt");
1055 fprintf(pFile,
"%ld \n", all3DPoints.size());
1056 Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
1058 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
1062 m_pAllNewDepthMapPoints->at(i)->vPosition.x - center.x,
1063 m_pAllNewDepthMapPoints->at(i)->vPosition.y - center.y,
1064 m_pAllNewDepthMapPoints->at(i)->vPosition.z - center.z);
1071 sFileName.append(
"cameraimage00.bmp");
1073 pImageColorLeft->SaveToFile(sFileName.c_str());
1077 sFileName.append(
"upwardsvector00.dat");
1079 pFile = fopen(sFileName.c_str(),
"wt");
1080 fprintf(pFile,
"%e %e %e \n", upwardsVector.x, upwardsVector.y, upwardsVector.z);
1085 if (m_bMakeIntermediateScreenshots)
1087 std::string sScreenshotFile = m_sScreenshotPath +
"disp0000.bmp";
1089 m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
1096 ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1099 m_pSegmentedBackgroundImage,
1103 m_pConfirmedHypotheses);
1107 for (
int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
1109 for (
int j = i; j > 0; j--)
1111 if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >=
1112 (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
1118 (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
1119 (*m_pConfirmedHypotheses)[j - 1] = pTemp;
1125 const int nNumTimesHypothesisMustHaveMoved = 1;
1126 bool nothingMoved =
false;
1128 for (
int i = m_pConfirmedHypotheses->GetSize() - 1;
1129 i >= 0 && m_pConfirmedHypotheses->GetSize() > 1;
1132 bool bHasMoved =
false;
1135 for (
int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved;
1138 bHasMoved |= (*m_pConfirmedHypotheses)[i]->aHypothesisHasMoved.at(j);
1144 << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber
1145 <<
" because it did not move the last "
1146 << nNumTimesHypothesisMustHaveMoved <<
" times.";
1147 delete (*m_pConfirmedHypotheses)[i];
1149 for (
int k = i; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1151 (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1154 m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1159 if (m_pConfirmedHypotheses->GetSize() == 1)
1161 bool bHasMoved =
false;
1162 const int nNumEntries = (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.size();
1164 for (
int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved;
1167 bHasMoved |= (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.at(j);
1172 nothingMoved =
true;
1178 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1180 for (
int j = i + 1; j < m_pConfirmedHypotheses->GetSize(); j++)
1183 (*m_pConfirmedHypotheses)[i], (*m_pConfirmedHypotheses)[j], calibration);
1184 ARMARX_VERBOSE <<
"Overlap " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber <<
" "
1185 << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber <<
": "
1188 if (fOverlapRatio > 0.5)
1191 << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber;
1192 delete (*m_pConfirmedHypotheses)[j];
1194 for (
int k = j; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1196 (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1199 m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1207 SwapAllPointsArraysToOld();
1209 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1211 ARMARX_VERBOSE <<
"Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber
1212 <<
" has been revalidated. "
1213 << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size()
1214 <<
" confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size()
1215 <<
" new candidates. Position: (" << (*m_pConfirmedHypotheses)[i]->vCenter.x
1216 <<
", " << (*m_pConfirmedHypotheses)[i]->vCenter.y <<
", "
1217 << (*m_pConfirmedHypotheses)[i]->vCenter.z <<
")";
1222 SaveHistogramOfConfirmedHypothesis(
"NewObject", nImageNumber);
1228 return !nothingMoved;
1232 ObjectLearningByPushing::UpdateDataFromRobot()
1234 if (!waitForPointClouds(pointcloudProviderName) || !waitForImages(imageProviderName))
1236 ARMARX_WARNING <<
"Timeout or error while waiting for point cloud data";
1240 if (getPointClouds(pointcloudProviderName, pointCloudPtr))
1244 if (pointCloudPtr->isOrganized())
1246 size_t h = cameraImages[0]->height;
1247 size_t w = cameraImages[0]->width;
1249 for (
size_t y = 0; y < h; y++)
1251 for (
size_t x = 0; x < w; x++)
1253 const pcl::PointXYZRGBA& p = pointCloudPtr->at(x, y);
1254 cameraImages[0]->pixels[3 * (y * w + x) + 0] = p.r;
1255 cameraImages[0]->pixels[3 * (y * w + x) + 1] = p.g;
1256 cameraImages[0]->pixels[3 * (y * w + x) + 2] = p.b;
1260 timestamp = pointCloudPtr->header.stamp;
1264 armarx::MetaInfoSizeBasePtr metaInfo;
1265 int nNumberImages = getImages(imageProviderName, cameraImages, metaInfo);
1268 timestamp = metaInfo->timeProvided;
1271 RemoteRobot::synchronizeLocalCloneToTimestamp(localRobot, robotStateProxy, timestamp);
1273 ::ImageProcessor::ConvertImage(colorImageLeft, greyImageLeft);
1277 localRobot->getRobotNode(cameraFrameName)->getPoseInRootFrame();
1278 tHeadToPlatformTransformation.rotation.r1 = cameraNodePose(0, 0);
1279 tHeadToPlatformTransformation.rotation.r2 = cameraNodePose(0, 1);
1280 tHeadToPlatformTransformation.rotation.r3 = cameraNodePose(0, 2);
1281 tHeadToPlatformTransformation.rotation.r4 = cameraNodePose(1, 0);
1282 tHeadToPlatformTransformation.rotation.r5 = cameraNodePose(1, 1);
1283 tHeadToPlatformTransformation.rotation.r6 = cameraNodePose(1, 2);
1284 tHeadToPlatformTransformation.rotation.r7 = cameraNodePose(2, 0);
1285 tHeadToPlatformTransformation.rotation.r8 = cameraNodePose(2, 1);
1286 tHeadToPlatformTransformation.rotation.r9 = cameraNodePose(2, 2);
1287 tHeadToPlatformTransformation.translation.x = cameraNodePose(0, 3);
1288 tHeadToPlatformTransformation.translation.y = cameraNodePose(1, 3);
1289 tHeadToPlatformTransformation.translation.z = cameraNodePose(2, 3);
1291 Eigen::Vector3f upInRootCS = {0, 0, 1};
1292 Eigen::Vector3f upInCamCS = cameraNodePose.block<3, 3>(0, 0).inverse() * upInRootCS;
1293 Math3d::SetVec(upwardsVector, upInCamCS(0), upInCamCS(1), upInCamCS(2));
1299 ObjectLearningByPushing::SaveHistogramOfConfirmedHypothesis(std::string sObjectName,
1300 int nDescriptorNumber)
1302 if (m_pConfirmedHypotheses->GetSize() > 0)
1306 (*m_pConfirmedHypotheses)[0], sObjectName, nDescriptorNumber);
1308 (*m_pConfirmedHypotheses)[0], sObjectName, nDescriptorNumber);
1319 ObjectLearningByPushing::RecognizeHypotheses(CByteImage* pImageColorLeft,
1320 const std::string objectName)
1322 CByteImage* pHSVImage =
1324 ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1326 if (!objectName.empty())
1330 *m_pAllNewDepthMapPoints,
1336 else if (m_pConfirmedHypotheses->GetSize() != 0 &&
false)
1340 float fdistance, fProbability;
1343 *m_pAllNewDepthMapPoints,
1353 std::vector<std::string> aNames;
1354 std::vector<Vec3d> aPositions;
1355 std::vector<Mat3d> aOrientations;
1356 std::vector<float> aProbabilities;
1359 *m_pAllNewDepthMapPoints,
1383 ObjectLearningByPushing::VisualizeHypotheses(CByteImage* pImageGreyLeft,
1384 CByteImage* pImageGreyRight,
1385 CByteImage* pImageColorLeft,
1386 CByteImage* pImageColorRight,
1387 bool bShowConfirmedHypotheses,
1388 CByteImage* resultImageLeft,
1389 CByteImage* resultImageRight,
1390 bool bMakeScreenshot)
1392 if (bShowConfirmedHypotheses)
1394 m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft,
1396 *m_pConfirmedHypotheses,
1397 *m_pAllOldSIFTPoints,
1399 *m_pCorrespondingMSERs,
1407 m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft,
1409 *m_pObjectHypotheses,
1410 *m_pAllOldSIFTPoints,
1412 *m_pCorrespondingMSERs,
1421 ObjectLearningByPushing::GetObjectPosition(
float& fObjectExtent,
bool bPreferCentralObject)
1423 Vec3d vRet = {0, 0, 0};
1426 std::vector<CHypothesisPoint*>* pPoints;
1427 CObjectHypothesis* pHypothesis = SelectPreferredHypothesis(pPoints, bPreferCentralObject);
1431 vRet.x = pHypothesis->
vCenter.x;
1432 vRet.y = pHypothesis->
vCenter.y;
1433 vRet.z = pHypothesis->
vCenter.z;
1441 ObjectLearningByPushing::ApplyHeadMotionTransformation(Mat3d mRotation,
Vec3d vTranslation)
1443 if (m_pConfirmedHypotheses->GetSize() > 0)
1445 for (
size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aNewPoints.size(); i++)
1447 Math3d::MulMatVec(mRotation,
1448 (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition,
1450 (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition);
1453 for (
size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.size(); i++)
1455 Math3d::MulMatVec(mRotation,
1456 (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition,
1458 (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition);
1461 for (
size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.size(); i++)
1463 Math3d::MulMatVec(mRotation,
1464 (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition,
1466 (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition);
1472 ObjectLearningByPushing::GetHypothesisBoundingBox(
int& nMinX,
int& nMaxX,
int& nMinY,
int& nMaxY)
1474 std::vector<CHypothesisPoint*>* pPoints;
1475 SelectPreferredHypothesis(pPoints);
1483 if (m_pConfirmedHypotheses->GetSize() > 0)
1485 for (
size_t i = 0; i < pPoints->size(); i++)
1487 calibration->CameraToImageCoordinates(pPoints->at(i)->vPosition, vPoint2D,
false);
1489 if (vPoint2D.x < nMinX)
1494 if (vPoint2D.x > nMaxX)
1499 if (vPoint2D.y < nMinY)
1504 if (vPoint2D.y > nMaxY)
1511 ARMARX_VERBOSE <<
"ObjectLearningByPushing::GetHypothesisBoundingBox: " << nMinX <<
" " << nMaxX
1512 <<
" " << nMinY <<
" " << nMaxY;
1516 ObjectLearningByPushing::GetHypothesisPrincipalAxesAndBoundingBox(
Vec3d& vPrincipalAxis1,
1517 Vec3d& vPrincipalAxis2,
1518 Vec3d& vPrincipalAxis3,
1519 Vec3d& vEigenValues,
1520 Vec3d& vMaxExtentFromCenter,
1521 Vec2d& vBoundingBoxLU,
1522 Vec2d& vBoundingBoxRU,
1523 Vec2d& vBoundingBoxLL,
1524 Vec2d& vBoundingBoxRL)
1526 std::vector<CHypothesisPoint*>* pPoints;
1527 SelectPreferredHypothesis(pPoints);
1529 const int nNumberOfSamples = pPoints->size();
1531 if (nNumberOfSamples < 3)
1533 ARMARX_WARNING <<
"Hypothesis contains " << nNumberOfSamples <<
" points - aborting!";
1539 Vec3d vAvgPoint = {0.0f, 0.0f, 0.0f};
1541 for (
int i = 0; i < nNumberOfSamples; i++)
1543 vAvgPoint.x += pPoints->at(i)->vPosition.x;
1544 vAvgPoint.y += pPoints->at(i)->vPosition.y;
1545 vAvgPoint.z += pPoints->at(i)->vPosition.z;
1548 vAvgPoint.x /= (
float)nNumberOfSamples;
1549 vAvgPoint.y /= (
float)nNumberOfSamples;
1550 vAvgPoint.z /= (
float)nNumberOfSamples;
1553 CFloatMatrix mSamplePoints(3, nNumberOfSamples);
1555 for (
int i = 0; i < nNumberOfSamples; i++)
1557 mSamplePoints(0, i) = pPoints->at(i)->vPosition.x - vAvgPoint.x;
1558 mSamplePoints(1, i) = pPoints->at(i)->vPosition.y - vAvgPoint.y;
1559 mSamplePoints(2, i) = pPoints->at(i)->vPosition.z - vAvgPoint.z;
1562 CFloatMatrix mEigenVectors(3, 3);
1563 CFloatMatrix mEigenValues(1, 3);
1565 LinearAlgebra::PCA(&mSamplePoints, &mEigenVectors, &mEigenValues);
1567 vPrincipalAxis1.x = mEigenVectors(0, 0);
1568 vPrincipalAxis1.y = mEigenVectors(1, 0);
1569 vPrincipalAxis1.z = mEigenVectors(2, 0);
1571 vPrincipalAxis2.x = mEigenVectors(0, 1);
1572 vPrincipalAxis2.y = mEigenVectors(1, 1);
1573 vPrincipalAxis2.z = mEigenVectors(2, 1);
1575 vPrincipalAxis3.x = mEigenVectors(0, 2);
1576 vPrincipalAxis3.y = mEigenVectors(1, 2);
1577 vPrincipalAxis3.z = mEigenVectors(2, 2);
1579 vEigenValues.x = mEigenValues(0, 0);
1580 vEigenValues.y = mEigenValues(0, 1);
1581 vEigenValues.z = mEigenValues(0, 2);
1586 Mat3d mTransformation;
1587 mTransformation.r1 = mEigenVectors(0, 0);
1588 mTransformation.r2 = mEigenVectors(1, 0);
1589 mTransformation.r3 = mEigenVectors(2, 0);
1590 mTransformation.r4 = mEigenVectors(0, 1);
1591 mTransformation.r5 = mEigenVectors(1, 1);
1592 mTransformation.r6 = mEigenVectors(2, 1);
1593 mTransformation.r7 = mEigenVectors(0, 2);
1594 mTransformation.r8 = mEigenVectors(1, 2);
1595 mTransformation.r9 = mEigenVectors(2, 2);
1598 Vec3d vTransformedPoint;
1599 float fMaxExtentX = 0, fMaxExtentY = 0, fMaxExtentZ = 0;
1601 for (
int i = 0; i < nNumberOfSamples; i++)
1603 Vec3d vTempPoint = {mSamplePoints(0, i),
1604 mSamplePoints(1, i),
1605 mSamplePoints(2, i)};
1606 Math3d::MulMatVec(mTransformation, vTempPoint, vTransformedPoint);
1608 if (fabsf(vTransformedPoint.x) > fMaxExtentX)
1610 fMaxExtentX = fabsf(vTransformedPoint.x);
1613 if (fabsf(vTransformedPoint.y) > fMaxExtentY)
1615 fMaxExtentY = fabsf(vTransformedPoint.y);
1618 if (fabsf(vTransformedPoint.z) > fMaxExtentZ)
1620 fMaxExtentZ = fabsf(vTransformedPoint.z);
1624 vMaxExtentFromCenter.x = fMaxExtentX;
1625 vMaxExtentFromCenter.y = fMaxExtentY;
1626 vMaxExtentFromCenter.z = fMaxExtentZ;
1631 Vec3d vTranslationPrincipalAxis1, vTranslationPrincipalAxis2;
1632 Math3d::MulVecScalar(vPrincipalAxis1,
1633 0.5f * (2.5f * pow(vEigenValues.x, 0.3333f) + vMaxExtentFromCenter.x),
1634 vTranslationPrincipalAxis1);
1635 Math3d::MulVecScalar(vPrincipalAxis2,
1636 0.5f * (2.5f * pow(vEigenValues.y, 0.3333f) + vMaxExtentFromCenter.y),
1637 vTranslationPrincipalAxis2);
1639 Vec3d vCorner1, vCorner2, vCorner3, vCorner4;
1641 Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner1);
1642 Math3d::SubtractFromVec(vCorner1, vTranslationPrincipalAxis2);
1644 Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner2);
1645 Math3d::AddToVec(vCorner2, vTranslationPrincipalAxis2);
1647 Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner3);
1648 Math3d::SubtractFromVec(vCorner3, vTranslationPrincipalAxis2);
1650 Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner4);
1651 Math3d::AddToVec(vCorner4, vTranslationPrincipalAxis2);
1654 calibration->CameraToImageCoordinates(vCorner1, vBoundingBoxLU,
false);
1655 calibration->CameraToImageCoordinates(vCorner2, vBoundingBoxRU,
false);
1656 calibration->CameraToImageCoordinates(vCorner3, vBoundingBoxLL,
false);
1657 calibration->CameraToImageCoordinates(vCorner4, vBoundingBoxRL,
false);
1663 if (vBoundingBoxLU.y > vBoundingBoxLL.y)
1665 Math2d::SetVec(vTemp, vBoundingBoxLU);
1666 Math2d::SetVec(vBoundingBoxLU, vBoundingBoxLL);
1667 Math2d::SetVec(vBoundingBoxLL, vTemp);
1670 if (vBoundingBoxLU.y > vBoundingBoxRL.y)
1672 Math2d::SetVec(vTemp, vBoundingBoxLU);
1673 Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRL);
1674 Math2d::SetVec(vBoundingBoxRL, vTemp);
1677 if (vBoundingBoxRU.y > vBoundingBoxLL.y)
1679 Math2d::SetVec(vTemp, vBoundingBoxRU);
1680 Math2d::SetVec(vBoundingBoxRU, vBoundingBoxLL);
1681 Math2d::SetVec(vBoundingBoxLL, vTemp);
1684 if (vBoundingBoxRU.y > vBoundingBoxRL.y)
1686 Math2d::SetVec(vTemp, vBoundingBoxRU);
1687 Math2d::SetVec(vBoundingBoxRU, vBoundingBoxRL);
1688 Math2d::SetVec(vBoundingBoxRL, vTemp);
1691 if (vBoundingBoxLU.x > vBoundingBoxRU.x)
1693 Math2d::SetVec(vTemp, vBoundingBoxLU);
1694 Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRU);
1695 Math2d::SetVec(vBoundingBoxRU, vTemp);
1698 if (vBoundingBoxLL.x > vBoundingBoxRL.x)
1700 Math2d::SetVec(vTemp, vBoundingBoxLL);
1701 Math2d::SetVec(vBoundingBoxLL, vBoundingBoxRL);
1702 Math2d::SetVec(vBoundingBoxRL, vTemp);
1710 ObjectLearningByPushing::ReportObjectPositionInformationToObserver()
1712 Vec3d vHypothesisPosition, vPrincipalAxis1, vPrincipalAxis2, vPrincipalAxis3, vEigenValues,
1714 Vec2d vBoundingBoxLeftUpper, vBoundingBoxRightUpper, vBoundingBoxLeftLower,
1715 vBoundingBoxRightLower;
1716 float fHypothesisExtent;
1717 vHypothesisPosition = GetObjectPosition(fHypothesisExtent);
1718 GetHypothesisPrincipalAxesAndBoundingBox(vPrincipalAxis1,
1723 vBoundingBoxLeftUpper,
1724 vBoundingBoxRightUpper,
1725 vBoundingBoxLeftLower,
1726 vBoundingBoxRightLower);
1728 Eigen::Vector3f vec(vHypothesisPosition.x, vHypothesisPosition.y, vHypothesisPosition.z);
1733 principalAxis1->x = vPrincipalAxis1.x;
1734 principalAxis1->y = vPrincipalAxis1.y;
1735 principalAxis1->z = vPrincipalAxis1.z;
1737 principalAxis2->x = vPrincipalAxis2.x;
1738 principalAxis2->y = vPrincipalAxis2.y;
1739 principalAxis2->z = vPrincipalAxis2.z;
1741 principalAxis3->x = vPrincipalAxis3.x;
1742 principalAxis3->y = vPrincipalAxis3.y;
1743 principalAxis3->z = vPrincipalAxis3.z;
1745 eigenvalues->x = vEigenValues.x;
1746 eigenvalues->y = vEigenValues.y;
1747 eigenvalues->z = vEigenValues.z;
1751 debugDrawer->setSphereVisu(getName(),
"GlobalObj", globalObjPose, DrawColor{0, 0, 1, 1}, 50.0);
1753 listener->reportObjectHypothesisPosition(hypothesisPosition,
1762 ObjectLearningByPushing::SetHeadToPlatformTransformation(
Vec3d vTranslation,
1764 bool bResetOldTransformation)
1766 if (bResetOldTransformation)
1768 Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, vTranslation);
1769 Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, mRotation);
1773 Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation,
1774 m_tHeadToPlatformTransformation.translation);
1775 Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation,
1776 m_tHeadToPlatformTransformation.rotation);
1779 Math3d::SetVec(m_tHeadToPlatformTransformation.translation, vTranslation);
1780 Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, mRotation);
1784 ObjectLearningByPushing::LoadAndFuseObjectSegmentations(std::string sObjectName)
1787 const int nFileIndexStart = 1;
1789 for (
int nMaxFileIndex = 2; nMaxFileIndex <= 51; nMaxFileIndex++)
1791 std::vector<CObjectHypothesis*> aObjectHypotheses;
1801 int nFileIndex = nFileIndexStart;
1804 nFileIndex <= nMaxFileIndex)
1806 aObjectHypotheses.push_back(pHypothesis);
1814 pHypothesis, sObjectName +
"-fused", aObjectHypotheses.size());
1816 pHypothesis, sObjectName +
"-fused", aObjectHypotheses.size());
1827 pImg->SaveToFile(sFileName.c_str());
1830 for (
size_t i = 0; i < aObjectHypotheses.size(); i++)
1832 delete aObjectHypotheses.at(i);
1840 ObjectLearningByPushing::SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints,
1841 const bool bPreferCentralObject)
1848 if (m_pConfirmedHypotheses->GetSize() > 0)
1850 hypotheses = m_pConfirmedHypotheses;
1852 else if (m_pInitialHypothesesAtLocalMaxima->GetSize() > 0)
1854 hypotheses = m_pInitialHypothesesAtLocalMaxima;
1859 if (bPreferCentralObject)
1866 Vec3d vCenter, vTemp;
1867 Math3d::SubtractVecVec(
1868 vCenterPlatform, m_tHeadToPlatformTransformation.translation, vTemp);
1869 Math3d::Transpose(m_tHeadToPlatformTransformation.rotation, mRotInv);
1870 Math3d::MulMatVec(mRotInv, vTemp, vCenter);
1871 ARMARX_VERBOSE <<
"Central point in platform cs: " << vCenterPlatform.x <<
", "
1872 << vCenterPlatform.y <<
", " << vCenterPlatform.z;
1873 ARMARX_VERBOSE <<
"m_tHeadToPlatformTransformation.translation: "
1874 << m_tHeadToPlatformTransformation.translation.x <<
", "
1875 << m_tHeadToPlatformTransformation.translation.y <<
", "
1876 << m_tHeadToPlatformTransformation.translation.z;
1878 << m_tHeadToPlatformTransformation.rotation.r1 <<
", "
1879 << m_tHeadToPlatformTransformation.rotation.r2 <<
", "
1880 << m_tHeadToPlatformTransformation.rotation.r3 <<
", "
1881 << m_tHeadToPlatformTransformation.rotation.r4 <<
", "
1882 << m_tHeadToPlatformTransformation.rotation.r5 <<
", "
1883 << m_tHeadToPlatformTransformation.rotation.r6 <<
", "
1884 << m_tHeadToPlatformTransformation.rotation.r7 <<
", "
1885 << m_tHeadToPlatformTransformation.rotation.r8 <<
", "
1886 << m_tHeadToPlatformTransformation.rotation.r9;
1887 ARMARX_VERBOSE <<
"Central point in camera cs: " << vCenter.x <<
", " << vCenter.y
1888 <<
", " << vCenter.z;
1894 float fMinDist = Math3d::Distance((*hypotheses)[0]->vCenter, vCenter);
1896 for (
int i = 1; i < hypotheses->GetSize(); i++)
1898 if (Math3d::Distance((*hypotheses)[i]->vCenter, vCenter) < fMinDist)
1903 fMinDist = Math3d::Distance((*hypotheses)[i]->vCenter, vCenter);
1909 pHypothesis = (*hypotheses)[nBestIndex];
1913 pHypothesis = (*hypotheses)[0];
1929 << pHypothesis->
vCenter.x <<
", " << pHypothesis->
vCenter.y <<
", "
1930 << pHypothesis->
vCenter.z <<
") is chosen for pushing";
1941 ObjectLearningByPushing::convertFileOLPtoPCL(std::string
filename,
bool includeCandidatePoints)
1943 setlocale(LC_NUMERIC,
"C");
1945 FILE* filePtr = fopen(
filename.c_str(),
"rt");
1947 if (filePtr == NULL)
1953 pcl::PointCloud<pcl::PointXYZRGBA>::Ptr objectPointCloud(
1954 new pcl::PointCloud<pcl::PointXYZRGBA>());
1958 float r, g, b, intensity;
1959 pcl::PointXYZRGBA point;
1960 int dummy = fscanf(filePtr,
"%lu", &nNumPoints);
1962 for (
size_t i = 0; i < nNumPoints; i++)
1964 dummy += fscanf(filePtr,
"%e %e %e", &point.x, &point.y, &point.z);
1965 dummy += fscanf(filePtr,
"%e %e %e %e \n", &r, &g, &b, &intensity);
1966 point.r = r * 255 * intensity;
1967 point.g = g * 255 * intensity;
1968 point.b = b * 255 * intensity;
1969 objectPointCloud->push_back(point);
1972 if (includeCandidatePoints)
1975 dummy += fscanf(filePtr,
"%lu", &nNumPoints);
1977 for (
size_t i = 0; i < nNumPoints; i++)
1979 dummy += fscanf(filePtr,
"%e %e %e", &point.x, &point.y, &point.z);
1980 dummy += fscanf(filePtr,
"%e %e %e %e \n", &r, &g, &b, &intensity);
1981 point.r = r * 255 * intensity;
1982 point.g = g * 255 * intensity;
1983 point.b = b * 255 * intensity;
1984 objectPointCloud->push_back(point);
1992 pcl::io::savePCDFile(
filename, *objectPointCloud);
1996 ObjectLearningByPushing::BoundingBoxInForegroundImage(CByteImage* image,
2002 for (
int i = 0; i < image->height; i++)
2004 if (i < minY || i > maxY)
2006 for (
int j = 0; j < image->width; j++)
2008 if (j < minX || j > maxX)
2010 image->pixels[i * image->width + j] = 0;