41 #include <Image/ByteImage.h>
42 #include <Image/ImageProcessor.h>
43 #include <Calibration/StereoCalibration.h>
44 #include <Calibration/Calibration.h>
45 #include <Math/LinearAlgebra.h>
46 #include <Math/FloatMatrix.h>
56 #include <VisionX/interface/components/Calibration.h>
60 #include <pcl/point_types.h>
61 #include <pcl/point_cloud.h>
62 #include <pcl/io/pcd_io.h>
66 #include <Eigen/Geometry>
78 listener->resetHypothesesStatus();
79 currentState = eCreatingInitialHypotheses;
86 listener->resetHypothesesStatus();
87 currentState = eValidatingInitialHypotheses;
94 listener->resetHypothesesStatus();
95 currentState = eRevalidatingConfirmedHypotheses;
102 types::PointList hypothesisPoints;
103 std::vector<CHypothesisPoint*>* pPoints;
104 SelectPreferredHypothesis(pPoints);
108 for (
size_t i = 0; i < pPoints->size(); i++)
110 Eigen::Vector3f p(pPoints->at(i)->vPosition.x, pPoints->at(i)->vPosition.y, pPoints->at(i)->vPosition.z);
112 hypothesisPoints.push_back(point);
116 return hypothesisPoints;
124 types::PointList scenePoints;
125 std::vector<CHypothesisPoint*>* points = m_pAllNewDepthMapPoints;
126 if (m_pAllNewDepthMapPoints->size() == 0)
128 points = m_pAllOldDepthMapPoints;
131 for (
size_t i = 0; i < points->size(); i++)
133 Eigen::Vector3f p(points->at(i)->vPosition.x, points->at(i)->vPosition.y, points->at(i)->vPosition.z);
135 scenePoints.push_back(point);
145 Vector3BasePtr upVector =
new Vector3(upwardsVector.x, upwardsVector.y, upwardsVector.z);
153 return cameraFrameName;
161 std::vector<CHypothesisPoint*>* pPoints;
163 PoseBasePtr result = NULL;
169 result =
new Pose(rot, trans);
179 UpdateDataFromRobot();
180 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation,
true);
182 m_pFeatureCalculation->GetAllFeaturePoints(colorImageLeft, greyImageLeft, pointCloudPtr,
OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
183 *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
185 RecognizeHypotheses(colorImageLeft, objectName);
198 imageProviderName = getProperty<std::string>(
"ImageProviderAdapterName").getValue();
199 usingImageProvider(imageProviderName);
201 robotStateProxyName = getProperty<std::string>(
"RobotStateProxyName").getValue();
202 usingProxy(robotStateProxyName);
205 cameraFrameName = getProperty<std::string>(
"CameraFrameName").getValue();
208 currentState = eNoHypotheses;
211 offeringTopic(
"ObjectLearningByPushing");
212 pointcloudProviderName = getProperty<std::string>(
"PointCloudProviderAdapterName").getValue();
213 usingPointCloudProvider(pointcloudProviderName);
216 offeringTopic(getProperty<std::string>(
"DebugDrawerTopicName").getValue());
223 ARMARX_VERBOSE <<
"entering onConnectPointCloudAndImageProcessor()";
226 ARMARX_INFO <<
"Connecting to " << pointcloudProviderName;
229 pointcloudProviderProxy = getProxy<RGBDPointCloudProviderInterfacePrx>(pointcloudProviderName);
230 calibration =
tools::convert(pointcloudProviderProxy->getStereoCalibration().calibrationLeft);
232 if (cameraFrameName != pointcloudProviderProxy->getReferenceFrame())
234 ARMARX_WARNING <<
"Specified camera frame name: '" << cameraFrameName <<
"' does not match frame given by provider '" << pointcloudProviderProxy->getReferenceFrame() <<
"'!";
238 if (pointcloudProviderProxy->getReferenceFrame() ==
"DepthCameraSim")
240 ARMARX_WARNING <<
"Simulation detected. Applying fix for DepthCameraSim missalignment.";
243 Mat3d R =
tools::convert(pointcloudProviderProxy->getStereoCalibration().calibrationLeft.cameraParam.rotation);
247 worldToCamera << R.r1, R.r2, R.r3, R.r4, R.r5, R.r6, R.r7, R.r8, R.r9;
249 cameraToImg << 0, -1, 0, 1, 0, 0, 0, 0, 1;
251 worldToCamera = cameraToImg * worldToCamera;
252 R.r1 = worldToCamera(0, 0);
253 R.r2 = worldToCamera(0, 1);
254 R.r3 = worldToCamera(0, 2);
255 R.r4 = worldToCamera(1, 0);
256 R.r5 = worldToCamera(1, 1);
257 R.r6 = worldToCamera(1, 2);
258 R.r7 = worldToCamera(2, 0);
259 R.r8 = worldToCamera(2, 1);
260 R.r9 = worldToCamera(2, 2);
262 calibration->SetRotation(R);
265 pointCloudPtr.reset(
new pcl::PointCloud<pcl::PointXYZRGBA>());
268 ARMARX_INFO <<
"Connecting to " << imageProviderName;
270 imageProviderProxy = getProxy<ImageProviderInterfacePrx>(imageProviderName);
275 colorImageLeftOld =
new CByteImage(colorImageLeft);
276 greyImageLeft =
new CByteImage(colorImageLeft->width, colorImageLeft->height, CByteImage::eGrayScale);
277 greyImageRight =
new CByteImage(greyImageLeft);
278 resultImageLeft =
new CByteImage(colorImageLeft);
279 resultImageRight =
new CByteImage(colorImageLeft);
280 tempResultImage =
new CByteImage(colorImageLeft);
282 cameraImages =
new CByteImage*[2];
283 cameraImages[0] = colorImageLeft;
284 cameraImages[1] = colorImageRight;
286 resultImages =
new CByteImage*[2];
287 resultImages[0] = resultImageLeft;
288 resultImages[1] = resultImageRight;
303 iterationCounter = 0;
306 ARMARX_INFO <<
"Connecting to " << robotStateProxyName;
307 robotStateProxy = getProxy<RobotStateComponentInterfacePrx>(robotStateProxyName);
314 #if defined OMP_NUM_THREADS
315 nNumOMPThreads = OMP_NUM_THREADS;
316 #elif defined OLP_USE_DBVISION
317 nNumOMPThreads = omp_get_num_procs() - 1;
319 nNumOMPThreads = omp_get_num_procs();
322 if (nNumOMPThreads < 1)
327 omp_set_num_threads(nNumOMPThreads);
334 m_pAllNewMSERs =
new std::vector<CMSERDescriptor3D*>();
335 m_pAllOldMSERs =
new std::vector<CMSERDescriptor3D*>();
336 m_pCorrespondingMSERs =
new std::vector<CMSERDescriptor3D*>();
337 m_pAllNewDepthMapPoints =
new std::vector<CHypothesisPoint*>();
338 m_pAllOldDepthMapPoints =
new std::vector<CHypothesisPoint*>();
342 Math3d::SetVec(m_tHeadToPlatformTransformation.translation, Math3d::zero_vec);
343 Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, Math3d::zero_vec);
344 Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, Math3d::unit_mat);
345 Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, Math3d::unit_mat);
347 Math3d::SetVec(upwardsVector, Math3d::zero_vec);
350 listener = getTopic<ObjectLearningByPushingListenerPrx>(
"ObjectLearningByPushing");
360 debugDrawer = getTopic<DebugDrawerInterfacePrx>(getProperty<std::string>(
"DebugDrawerTopicName").getValue());
361 debugDrawer->clearLayer(getName());
374 delete m_pHypothesisVisualization;
375 delete m_pHypothesisGeneration;
377 for (
int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
379 delete (*m_pObjectHypotheses)[i];
382 delete m_pObjectHypotheses;
384 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
386 delete (*m_pConfirmedHypotheses)[i];
389 delete m_pConfirmedHypotheses;
390 delete m_pInitialHypothesesAtLocalMaxima;
393 for (
int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
395 delete (*m_pAllNewSIFTPoints)[i];
398 delete m_pAllNewSIFTPoints;
400 for (
int i = 0; i < m_pAllOldSIFTPoints->GetSize(); i++)
402 delete (*m_pAllOldSIFTPoints)[i];
405 delete m_pAllOldSIFTPoints;
407 for (
size_t i = 0; i < m_pAllNewMSERs->size(); i++)
409 delete m_pAllNewMSERs->at(i);
412 delete m_pAllNewMSERs;
414 for (
size_t i = 0; i < m_pAllOldMSERs->size(); i++)
416 delete m_pAllOldMSERs->at(i);
419 delete m_pAllOldMSERs;
421 for (
size_t i = 0; i < m_pCorrespondingMSERs->size(); i++)
423 delete m_pCorrespondingMSERs->at(i);
426 delete m_pCorrespondingMSERs;
428 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
430 delete m_pAllNewDepthMapPoints->at(i);
433 delete m_pAllNewDepthMapPoints;
435 for (
size_t i = 0; i < m_pAllOldDepthMapPoints->size(); i++)
437 delete m_pAllOldDepthMapPoints->at(i);
440 delete m_pAllOldDepthMapPoints;
442 delete m_pGaussBackground;
453 ARMARX_INFO <<
"process() called before ready (weird race condition with the ImageProcessor::process)";
459 std::lock_guard<std::mutex> lock(processingLock);
461 switch (currentState)
463 case eCreatingInitialHypotheses:
466 UpdateDataFromRobot();
467 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation,
true);
470 CreateInitialObjectHypothesesInternal(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
473 VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight,
false, resultImageLeft, resultImageRight, bMakeScreenshots);
474 provideResultImages(resultImages);
477 if (GetNumberOfNonconfirmedHypotheses() > 0)
479 ARMARX_INFO <<
"Found " << GetNumberOfNonconfirmedHypotheses() <<
" possible objects";
482 ReportObjectPositionInformationToObserver();
483 listener->reportInitialObjectHypothesesCreated(
true);
485 currentState = eHasInitialHypotheses;
491 listener->reportInitialObjectHypothesesCreated(
false);
493 currentState = eNoHypotheses;
501 case eValidatingInitialHypotheses:
504 ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
505 UpdateDataFromRobot();
506 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation,
true);
509 bool validationSuccessful = ValidateInitialObjectHypothesesInternal(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
512 VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeftOld, colorImageRight,
false, resultImageRight, tempResultImage, bMakeScreenshots);
514 VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight,
true, resultImageLeft, tempResultImage, bMakeScreenshots);
515 provideResultImages(resultImages);
517 if (validationSuccessful)
519 ARMARX_INFO <<
"At least one object hypothesis was confirmed";
522 ReportObjectPositionInformationToObserver();
523 listener->reportObjectHypothesesValidated(
true);
525 currentState = eHasConfirmedHypotheses;
529 ARMARX_INFO <<
"No object hypothesis was confirmed";
531 listener->reportObjectHypothesesValidated(
false);
533 currentState = eNoHypotheses;
541 case eRevalidatingConfirmedHypotheses:
544 ::ImageProcessor::CopyImage(colorImageLeft, colorImageLeftOld);
545 UpdateDataFromRobot();
546 SetHeadToPlatformTransformation(tHeadToPlatformTransformation.translation, tHeadToPlatformTransformation.rotation,
true);
549 bool validationSuccessful = RevalidateConfirmedObjectHypothesesInternal(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight, iterationCounter);
552 VisualizeHypotheses(greyImageLeft, greyImageRight, colorImageLeft, colorImageRight,
true, resultImageLeft, resultImageRight, bMakeScreenshots);
553 provideResultImages(resultImages);
555 ARMARX_INFO <<
"The confirmed object hypotheses were revalidated";
556 ARMARX_IMPORTANT <<
"Was revalidation successful: " << validationSuccessful;
559 ReportObjectPositionInformationToObserver();
560 listener->reportObjectHypothesesValidated(validationSuccessful);
562 currentState = eHasConfirmedHypotheses;
569 case eHasInitialHypotheses:
570 case eHasConfirmedHypotheses:
572 provideResultImages(resultImages);
579 UpdateDataFromRobot();
580 ::ImageProcessor::CopyImage(cameraImages[0], resultImages[0]);
581 ::ImageProcessor::CopyImage(cameraImages[0], resultImages[1]);
582 provideResultImages(resultImages);
604 void ObjectLearningByPushing::SwapAllPointsArraysToOld()
607 pTempSIFTPoints = m_pAllOldSIFTPoints;
608 m_pAllOldSIFTPoints = m_pAllNewSIFTPoints;
609 m_pAllNewSIFTPoints = pTempSIFTPoints;
611 for (
int i = 0; i < m_pAllNewSIFTPoints->GetSize(); i++)
613 delete (*m_pAllNewSIFTPoints)[i];
616 m_pAllNewSIFTPoints->Clear();
618 std::vector<CMSERDescriptor3D*>* pTempMSERs;
619 pTempMSERs = m_pAllOldMSERs;
620 m_pAllOldMSERs = m_pAllNewMSERs;
621 m_pAllNewMSERs = pTempMSERs;
623 for (
size_t i = 0; i < m_pAllNewMSERs->size(); i++)
625 delete m_pAllNewMSERs->at(i);
628 m_pAllNewMSERs->clear();
630 std::vector<CHypothesisPoint*>* pTempDepthMapPoints;
631 pTempDepthMapPoints = m_pAllOldDepthMapPoints;
632 m_pAllOldDepthMapPoints = m_pAllNewDepthMapPoints;
633 m_pAllNewDepthMapPoints = pTempDepthMapPoints;
635 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
637 delete m_pAllNewDepthMapPoints->at(i);
640 m_pAllNewDepthMapPoints->clear();
646 void ObjectLearningByPushing::CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight,
647 CByteImage* pImageColorLeft, CByteImage* pImageColorRight,
int nImageNumber)
650 for (
int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
652 delete (*m_pObjectHypotheses)[i];
655 m_pObjectHypotheses->Clear();
657 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
659 delete (*m_pConfirmedHypotheses)[i];
662 m_pConfirmedHypotheses->Clear();
663 m_pInitialHypothesesAtLocalMaxima->Clear();
668 m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr, 2 *
OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
669 *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
673 if (m_bMakeIntermediateScreenshots)
675 std::string sScreenshotFile = m_sScreenshotPath +
"disp0000.bmp";
677 m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
690 m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
693 m_pHypothesisGeneration->FindObjectHypotheses(pImageColorLeft, pImageColorRight, pImageGreyLeft, pImageGreyRight,
694 *m_pAllNewSIFTPoints, *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, *m_pObjectHypotheses);
697 #ifdef OLP_FILTER_INITIAL_HYPOTHESES_WITH_MAXIMUMNESS
700 std::vector<Vec3d> aMaxima;
703 calibration, aMaxima, pMaximumnessImage);
705 if (m_bMakeIntermediateScreenshots)
707 std::string sScreenshotFile = m_sScreenshotPath +
"max0000.bmp";
709 pMaximumnessImage->SaveToFile(sScreenshotFile.c_str());
712 for (
int n = 0; n < m_pObjectHypotheses->GetSize(); n++)
715 float fForegroundRatio;
716 int nNumForegroundPoints;
720 if (fForegroundRatio > 0.4f)
722 m_pInitialHypothesesAtLocalMaxima->AddElement(pHypothesis);
726 if ((m_pInitialHypothesesAtLocalMaxima->GetSize() == 0) && (m_pObjectHypotheses->GetSize() > 0))
728 m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[0]);
731 m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft, pImageColorRight, *m_pInitialHypothesesAtLocalMaxima, *m_pAllOldSIFTPoints,
732 *m_pAllOldMSERs, *m_pCorrespondingMSERs,
false, NULL, NULL, m_bMakeIntermediateScreenshots);
734 delete pMaximumnessImage;
737 for (
int i = 0; i < m_pObjectHypotheses->GetSize(); i++)
739 m_pInitialHypothesesAtLocalMaxima->AddElement((*m_pObjectHypotheses)[i]);
745 SwapAllPointsArraysToOld();
751 bool ObjectLearningByPushing::ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight,
752 CByteImage* pImageColorLeft, CByteImage* pImageColorRight,
int nImageNumber)
757 for (
int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
759 delete m_pCorrespondingMSERs->at(i);
762 m_pCorrespondingMSERs->clear();
766 m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
767 BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
768 m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
773 if (m_bMakeIntermediateScreenshots)
775 std::string sScreenshotFile = m_sScreenshotPath +
"bg0000.bmp";
777 m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
781 m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr,
OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
782 *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
785 if (m_bMakeIntermediateScreenshots)
787 std::string sScreenshotFile = m_sScreenshotPath +
"disp0000.bmp";
789 m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
792 CByteImage* pEdges =
new CByteImage(m_pDisparityImage);
794 sScreenshotFile = m_sScreenshotPath +
"edges0000.bmp";
796 pEdges->SaveToFile(sScreenshotFile.c_str());
801 ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
807 int nNumValidatedObjects = m_pConfirmedHypotheses->GetSize();
810 SwapAllPointsArraysToOld();
812 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
814 ARMARX_VERBOSE <<
"Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber <<
" has been revalidated. "
815 << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size() <<
" confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size()
816 <<
" new candidates. Position: (" << (*m_pConfirmedHypotheses)[i]->vCenter.x <<
", " << (*m_pConfirmedHypotheses)[i]->vCenter.y
817 <<
", " << (*m_pConfirmedHypotheses)[i]->vCenter.z <<
")";
821 for (
int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
823 for (
int j = i; j > 0; j--)
825 if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >= (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
831 (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
832 (*m_pConfirmedHypotheses)[j - 1] = pTemp;
838 SaveHistogramOfConfirmedHypothesis(
"NewObject", nImageNumber);
843 return (nNumValidatedObjects > 0);
849 bool ObjectLearningByPushing::RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight, CByteImage* pImageColorLeft, CByteImage* pImageColorRight,
int nImageNumber)
854 for (
int i = 0; i < (int)m_pCorrespondingMSERs->size(); i++)
856 delete m_pCorrespondingMSERs->at(i);
859 m_pCorrespondingMSERs->clear();
863 m_pGaussBackground->GetBinaryForegroundImageRGB(pImageColorLeft, m_pSegmentedBackgroundImage);
864 BoundingBoxInForegroundImage(m_pSegmentedBackgroundImage, 100, 540, 100, 480);
865 m_pGaussBackground->LearnBackgroundRGB(&pImageColorLeft, 1);
870 if (m_bMakeIntermediateScreenshots)
872 std::string sScreenshotFile = m_sScreenshotPath +
"bg0000.bmp";
874 m_pSegmentedBackgroundImage->SaveToFile(sScreenshotFile.c_str());
880 m_pFeatureCalculation->GetAllFeaturePoints(pImageColorLeft, pImageGreyLeft, pointCloudPtr,
OLP_DEPTH_MAP_PIXEL_DISTANCE, *m_pAllNewSIFTPoints,
881 *m_pAllNewMSERs, *m_pAllNewDepthMapPoints, calibration);
889 std::vector<Vec3d> all3DPoints;
891 for (
size_t i = 0; i < pointCloudPtr->size(); i++)
893 pcl::PointXYZRGBA point = pointCloudPtr->at(i);
897 if (point.x * point.x + point.y * point.y + point.z * point.z > 0)
899 Vec3d point3d = {point.x, point.y, point.z};
900 all3DPoints.push_back(point3d);
906 setlocale(LC_NUMERIC,
"C");
908 sFileName.append(
"all3Dpoints00.dat");
910 FILE* pFile = fopen(sFileName.c_str(),
"wt");
911 fprintf(pFile,
"%ld \n", all3DPoints.size());
912 Vec3d center = {0, 0, 0};
914 for (
size_t i = 0; i < all3DPoints.size(); i++)
916 fprintf(pFile,
"%e %e %e \n", all3DPoints.at(i).x, all3DPoints.at(i).y, all3DPoints.at(i).z);
917 Math3d::AddToVec(center, all3DPoints.at(i));
924 sFileName.append(
"all3Dpoints-centered00.dat");
926 pFile = fopen(sFileName.c_str(),
"wt");
927 fprintf(pFile,
"%ld \n", all3DPoints.size());
928 Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
930 for (
size_t i = 0; i < all3DPoints.size(); i++)
932 fprintf(pFile,
"%e %e %e \n", all3DPoints.at(i).x - center.x, all3DPoints.at(i).y - center.y, all3DPoints.at(i).z - center.z);
938 setlocale(LC_NUMERIC,
"C");
940 sFileName.append(
"allusedpoints00.dat");
942 pFile = fopen(sFileName.c_str(),
"wt");
943 fprintf(pFile,
"%ld \n", all3DPoints.size());
944 Math3d::SetVec(center, Math3d::zero_vec);
946 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
948 fprintf(pFile,
"%e %e %e \n", m_pAllNewDepthMapPoints->at(i)->vPosition.x, m_pAllNewDepthMapPoints->at(i)->vPosition.y,
949 m_pAllNewDepthMapPoints->at(i)->vPosition.z);
950 Math3d::AddToVec(center, m_pAllNewDepthMapPoints->at(i)->vPosition);
957 sFileName.append(
"allusedpoints-centered00.dat");
959 pFile = fopen(sFileName.c_str(),
"wt");
960 fprintf(pFile,
"%ld \n", all3DPoints.size());
961 Math3d::MulVecScalar(center, 1.0 / all3DPoints.size(), center);
963 for (
size_t i = 0; i < m_pAllNewDepthMapPoints->size(); i++)
965 fprintf(pFile,
"%e %e %e \n", m_pAllNewDepthMapPoints->at(i)->vPosition.x - center.x,
966 m_pAllNewDepthMapPoints->at(i)->vPosition.y - center.y, m_pAllNewDepthMapPoints->at(i)->vPosition.z - center.z);
973 sFileName.append(
"cameraimage00.bmp");
975 pImageColorLeft->SaveToFile(sFileName.c_str());
979 sFileName.append(
"upwardsvector00.dat");
981 pFile = fopen(sFileName.c_str(),
"wt");
982 fprintf(pFile,
"%e %e %e \n", upwardsVector.x, upwardsVector.y, upwardsVector.z);
987 if (m_bMakeIntermediateScreenshots)
989 std::string sScreenshotFile = m_sScreenshotPath +
"disp0000.bmp";
991 m_pDisparityImage->SaveToFile(sScreenshotFile.c_str());
998 ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1004 for (
int i = 1; i < m_pConfirmedHypotheses->GetSize(); i++)
1006 for (
int j = i; j > 0; j--)
1008 if ((*m_pConfirmedHypotheses)[j - 1]->aConfirmedPoints.size() >= (*m_pConfirmedHypotheses)[j]->aConfirmedPoints.size())
1014 (*m_pConfirmedHypotheses)[j] = (*m_pConfirmedHypotheses)[j - 1];
1015 (*m_pConfirmedHypotheses)[j - 1] = pTemp;
1021 const int nNumTimesHypothesisMustHaveMoved = 1;
1022 bool nothingMoved =
false;
1024 for (
int i = m_pConfirmedHypotheses->GetSize() - 1; i >= 0 && m_pConfirmedHypotheses->GetSize() > 1; i--)
1026 bool bHasMoved =
false;
1029 for (
int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved; j--)
1031 bHasMoved |= (*m_pConfirmedHypotheses)[i]->aHypothesisHasMoved.at(j);
1036 ARMARX_VERBOSE <<
"Removing hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber <<
" because it did not move the last " << nNumTimesHypothesisMustHaveMoved <<
" times.";
1037 delete (*m_pConfirmedHypotheses)[i];
1039 for (
int k = i; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1041 (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1044 m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1049 if (m_pConfirmedHypotheses->GetSize() == 1)
1051 bool bHasMoved =
false;
1052 const int nNumEntries = (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.size();
1054 for (
int j = nNumEntries - 1; j >= 0 && j >= nNumEntries - nNumTimesHypothesisMustHaveMoved; j--)
1056 bHasMoved |= (*m_pConfirmedHypotheses)[0]->aHypothesisHasMoved.at(j);
1061 nothingMoved =
true;
1068 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1070 for (
int j = i + 1; j < m_pConfirmedHypotheses->GetSize(); j++)
1073 ARMARX_VERBOSE <<
"Overlap " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber <<
" " << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber <<
": " << fOverlapRatio;
1075 if (fOverlapRatio > 0.5)
1077 ARMARX_VERBOSE <<
"Removing hypothesis " << (*m_pConfirmedHypotheses)[j]->nHypothesisNumber;
1078 delete (*m_pConfirmedHypotheses)[j];
1080 for (
int k = j; k < m_pConfirmedHypotheses->GetSize() - 1; k++)
1082 (*m_pConfirmedHypotheses)[k] = (*m_pConfirmedHypotheses)[k + 1];
1085 m_pConfirmedHypotheses->DeleteElement(m_pConfirmedHypotheses->GetSize() - 1);
1093 SwapAllPointsArraysToOld();
1095 for (
int i = 0; i < m_pConfirmedHypotheses->GetSize(); i++)
1097 ARMARX_VERBOSE <<
"Hypothesis " << (*m_pConfirmedHypotheses)[i]->nHypothesisNumber <<
" has been revalidated. " << (*m_pConfirmedHypotheses)[i]->aConfirmedPoints.size()
1098 <<
" confirmed points, " << (*m_pConfirmedHypotheses)[i]->aNewPoints.size() <<
" new candidates. Position: ("
1099 << (*m_pConfirmedHypotheses)[i]->vCenter.x <<
", " << (*m_pConfirmedHypotheses)[i]->vCenter.y
1100 <<
", " << (*m_pConfirmedHypotheses)[i]->vCenter.z <<
")";
1105 SaveHistogramOfConfirmedHypothesis(
"NewObject", nImageNumber);
1111 return !nothingMoved;
1118 void ObjectLearningByPushing::UpdateDataFromRobot()
1120 if (!waitForPointClouds(pointcloudProviderName) || !waitForImages(imageProviderName))
1122 ARMARX_WARNING <<
"Timeout or error while waiting for point cloud data";
1126 if (getPointClouds(pointcloudProviderName, pointCloudPtr))
1130 if (pointCloudPtr->isOrganized())
1132 size_t h = cameraImages[0]->height;
1133 size_t w = cameraImages[0]->width;
1135 for (
size_t y = 0; y < h; y++)
1137 for (
size_t x = 0; x < w; x++)
1139 const pcl::PointXYZRGBA& p = pointCloudPtr->at(x, y);
1140 cameraImages[0]->pixels[3 * (y * w + x) + 0] = p.r;
1141 cameraImages[0]->pixels[3 * (y * w + x) + 1] = p.g;
1142 cameraImages[0]->pixels[3 * (y * w + x) + 2] = p.b;
1146 timestamp = pointCloudPtr->header.stamp;
1150 armarx::MetaInfoSizeBasePtr metaInfo;
1151 int nNumberImages = getImages(imageProviderName, cameraImages, metaInfo);
1154 timestamp = metaInfo->timeProvided;
1157 RemoteRobot::synchronizeLocalCloneToTimestamp(localRobot, robotStateProxy, timestamp);
1159 ::ImageProcessor::ConvertImage(colorImageLeft, greyImageLeft);
1162 Eigen::Matrix4f cameraNodePose = localRobot->getRobotNode(cameraFrameName)->getPoseInRootFrame();
1163 tHeadToPlatformTransformation.rotation.r1 = cameraNodePose(0, 0);
1164 tHeadToPlatformTransformation.rotation.r2 = cameraNodePose(0, 1);
1165 tHeadToPlatformTransformation.rotation.r3 = cameraNodePose(0, 2);
1166 tHeadToPlatformTransformation.rotation.r4 = cameraNodePose(1, 0);
1167 tHeadToPlatformTransformation.rotation.r5 = cameraNodePose(1, 1);
1168 tHeadToPlatformTransformation.rotation.r6 = cameraNodePose(1, 2);
1169 tHeadToPlatformTransformation.rotation.r7 = cameraNodePose(2, 0);
1170 tHeadToPlatformTransformation.rotation.r8 = cameraNodePose(2, 1);
1171 tHeadToPlatformTransformation.rotation.r9 = cameraNodePose(2, 2);
1172 tHeadToPlatformTransformation.translation.x = cameraNodePose(0, 3);
1173 tHeadToPlatformTransformation.translation.y = cameraNodePose(1, 3);
1174 tHeadToPlatformTransformation.translation.z = cameraNodePose(2, 3);
1176 Eigen::Vector3f upInRootCS = {0, 0, 1};
1177 Eigen::Vector3f upInCamCS = cameraNodePose.block<3, 3>(0, 0).inverse() * upInRootCS;
1178 Math3d::SetVec(upwardsVector, upInCamCS(0), upInCamCS(1), upInCamCS(2));
1186 bool ObjectLearningByPushing::SaveHistogramOfConfirmedHypothesis(std::string sObjectName,
int nDescriptorNumber)
1188 if (m_pConfirmedHypotheses->GetSize() > 0)
1205 void ObjectLearningByPushing::RecognizeHypotheses(CByteImage* pImageColorLeft,
const std::string objectName)
1208 ::ImageProcessor::CalculateHSVImage(pImageColorLeft, pHSVImage);
1210 if (!objectName.empty())
1214 else if (m_pConfirmedHypotheses->GetSize() != 0 &&
false)
1218 float fdistance, fProbability;
1219 CObjectRecognition::FindObjectRGBD((*m_pConfirmedHypotheses)[0], pHSVImage, *m_pAllNewDepthMapPoints, calibration, upwardsVector, vPosition, mOrientation, fdistance, fProbability);
1223 std::vector<std::string> aNames;
1224 std::vector<Vec3d> aPositions;
1225 std::vector<Mat3d> aOrientations;
1226 std::vector<float> aProbabilities;
1247 void ObjectLearningByPushing::VisualizeHypotheses(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight,
1248 CByteImage* pImageColorLeft, CByteImage* pImageColorRight,
1249 bool bShowConfirmedHypotheses, CByteImage* resultImageLeft,
1250 CByteImage* resultImageRight,
bool bMakeScreenshot)
1252 if (bShowConfirmedHypotheses)
1254 m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft, pImageColorRight, *m_pConfirmedHypotheses,
1255 *m_pAllOldSIFTPoints, *m_pAllOldMSERs, *m_pCorrespondingMSERs,
true,
1256 resultImageLeft, resultImageRight, bMakeScreenshot);
1260 m_pHypothesisVisualization->VisualizeHypotheses(pImageColorLeft, pImageColorRight, *m_pObjectHypotheses,
1261 *m_pAllOldSIFTPoints, *m_pAllOldMSERs, *m_pCorrespondingMSERs,
false,
1262 resultImageLeft, resultImageRight, bMakeScreenshot);
1271 Vec3d ObjectLearningByPushing::GetObjectPosition(
float& fObjectExtent,
bool bPreferCentralObject)
1273 Vec3d vRet = {0, 0, 0};
1276 std::vector<CHypothesisPoint*>* pPoints;
1277 CObjectHypothesis* pHypothesis = SelectPreferredHypothesis(pPoints, bPreferCentralObject);
1281 vRet.x = pHypothesis->
vCenter.x;
1282 vRet.y = pHypothesis->
vCenter.y;
1283 vRet.z = pHypothesis->
vCenter.z;
1294 void ObjectLearningByPushing::ApplyHeadMotionTransformation(Mat3d mRotation,
Vec3d vTranslation)
1296 if (m_pConfirmedHypotheses->GetSize() > 0)
1298 for (
size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aNewPoints.size(); i++)
1300 Math3d::MulMatVec(mRotation, (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition, vTranslation, (*m_pConfirmedHypotheses)[0]->aNewPoints.at(i)->vPosition);
1303 for (
size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.size(); i++)
1305 Math3d::MulMatVec(mRotation, (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition, vTranslation, (*m_pConfirmedHypotheses)[0]->aConfirmedPoints.at(i)->vPosition);
1308 for (
size_t i = 0; i < (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.size(); i++)
1310 Math3d::MulMatVec(mRotation, (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition, vTranslation, (*m_pConfirmedHypotheses)[0]->aDoubtablePoints.at(i)->vPosition);
1317 void ObjectLearningByPushing::GetHypothesisBoundingBox(
int& nMinX,
int& nMaxX,
int& nMinY,
int& nMaxY)
1319 std::vector<CHypothesisPoint*>* pPoints;
1320 SelectPreferredHypothesis(pPoints);
1328 if (m_pConfirmedHypotheses->GetSize() > 0)
1330 for (
size_t i = 0; i < pPoints->size(); i++)
1332 calibration->CameraToImageCoordinates(pPoints->at(i)->vPosition, vPoint2D,
false);
1334 if (vPoint2D.x < nMinX)
1339 if (vPoint2D.x > nMaxX)
1344 if (vPoint2D.y < nMinY)
1349 if (vPoint2D.y > nMaxY)
1356 ARMARX_VERBOSE <<
"ObjectLearningByPushing::GetHypothesisBoundingBox: " << nMinX <<
" " << nMaxX <<
" " << nMinY <<
" " << nMaxY;
1362 void ObjectLearningByPushing::GetHypothesisPrincipalAxesAndBoundingBox(
Vec3d& vPrincipalAxis1,
Vec3d& vPrincipalAxis2,
Vec3d& vPrincipalAxis3,
1363 Vec3d& vEigenValues,
Vec3d& vMaxExtentFromCenter,
1366 std::vector<CHypothesisPoint*>* pPoints;
1367 SelectPreferredHypothesis(pPoints);
1369 const int nNumberOfSamples = pPoints->size();
1371 if (nNumberOfSamples < 3)
1373 ARMARX_WARNING <<
"Hypothesis contains " << nNumberOfSamples <<
" points - aborting!";
1379 Vec3d vAvgPoint = {0.0f, 0.0f, 0.0f};
1381 for (
int i = 0; i < nNumberOfSamples; i++)
1383 vAvgPoint.x += pPoints->at(i)->vPosition.x;
1384 vAvgPoint.y += pPoints->at(i)->vPosition.y;
1385 vAvgPoint.z += pPoints->at(i)->vPosition.z;
1388 vAvgPoint.x /= (
float)nNumberOfSamples;
1389 vAvgPoint.y /= (
float)nNumberOfSamples;
1390 vAvgPoint.z /= (
float)nNumberOfSamples;
1393 CFloatMatrix mSamplePoints(3, nNumberOfSamples);
1395 for (
int i = 0; i < nNumberOfSamples; i++)
1397 mSamplePoints(0, i) = pPoints->at(i)->vPosition.x - vAvgPoint.x;
1398 mSamplePoints(1, i) = pPoints->at(i)->vPosition.y - vAvgPoint.y;
1399 mSamplePoints(2, i) = pPoints->at(i)->vPosition.z - vAvgPoint.z;
1402 CFloatMatrix mEigenVectors(3, 3);
1403 CFloatMatrix mEigenValues(1, 3);
1405 LinearAlgebra::PCA(&mSamplePoints, &mEigenVectors, &mEigenValues);
1407 vPrincipalAxis1.x = mEigenVectors(0, 0);
1408 vPrincipalAxis1.y = mEigenVectors(1, 0);
1409 vPrincipalAxis1.z = mEigenVectors(2, 0);
1411 vPrincipalAxis2.x = mEigenVectors(0, 1);
1412 vPrincipalAxis2.y = mEigenVectors(1, 1);
1413 vPrincipalAxis2.z = mEigenVectors(2, 1);
1415 vPrincipalAxis3.x = mEigenVectors(0, 2);
1416 vPrincipalAxis3.y = mEigenVectors(1, 2);
1417 vPrincipalAxis3.z = mEigenVectors(2, 2);
1419 vEigenValues.x = mEigenValues(0, 0);
1420 vEigenValues.y = mEigenValues(0, 1);
1421 vEigenValues.z = mEigenValues(0, 2);
1426 Mat3d mTransformation;
1427 mTransformation.r1 = mEigenVectors(0, 0);
1428 mTransformation.r2 = mEigenVectors(1, 0);
1429 mTransformation.r3 = mEigenVectors(2, 0);
1430 mTransformation.r4 = mEigenVectors(0, 1);
1431 mTransformation.r5 = mEigenVectors(1, 1);
1432 mTransformation.r6 = mEigenVectors(2, 1);
1433 mTransformation.r7 = mEigenVectors(0, 2);
1434 mTransformation.r8 = mEigenVectors(1, 2);
1435 mTransformation.r9 = mEigenVectors(2, 2);
1438 Vec3d vTransformedPoint;
1439 float fMaxExtentX = 0, fMaxExtentY = 0, fMaxExtentZ = 0;
1441 for (
int i = 0; i < nNumberOfSamples; i++)
1443 Vec3d vTempPoint = {mSamplePoints(0, i), mSamplePoints(1, i), mSamplePoints(2, i)};
1444 Math3d::MulMatVec(mTransformation, vTempPoint, vTransformedPoint);
1446 if (fabsf(vTransformedPoint.x) > fMaxExtentX)
1448 fMaxExtentX = fabsf(vTransformedPoint.x);
1451 if (fabsf(vTransformedPoint.y) > fMaxExtentY)
1453 fMaxExtentY = fabsf(vTransformedPoint.y);
1456 if (fabsf(vTransformedPoint.z) > fMaxExtentZ)
1458 fMaxExtentZ = fabsf(vTransformedPoint.z);
1462 vMaxExtentFromCenter.x = fMaxExtentX;
1463 vMaxExtentFromCenter.y = fMaxExtentY;
1464 vMaxExtentFromCenter.z = fMaxExtentZ;
1469 Vec3d vTranslationPrincipalAxis1, vTranslationPrincipalAxis2;
1470 Math3d::MulVecScalar(vPrincipalAxis1, 0.5f * (2.5f * pow(vEigenValues.x, 0.3333f) + vMaxExtentFromCenter.x), vTranslationPrincipalAxis1);
1471 Math3d::MulVecScalar(vPrincipalAxis2, 0.5f * (2.5f * pow(vEigenValues.y, 0.3333f) + vMaxExtentFromCenter.y), vTranslationPrincipalAxis2);
1473 Vec3d vCorner1, vCorner2, vCorner3, vCorner4;
1475 Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner1);
1476 Math3d::SubtractFromVec(vCorner1, vTranslationPrincipalAxis2);
1478 Math3d::SubtractVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner2);
1479 Math3d::AddToVec(vCorner2, vTranslationPrincipalAxis2);
1481 Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner3);
1482 Math3d::SubtractFromVec(vCorner3, vTranslationPrincipalAxis2);
1484 Math3d::AddVecVec(vAvgPoint, vTranslationPrincipalAxis1, vCorner4);
1485 Math3d::AddToVec(vCorner4, vTranslationPrincipalAxis2);
1488 calibration->CameraToImageCoordinates(vCorner1, vBoundingBoxLU,
false);
1489 calibration->CameraToImageCoordinates(vCorner2, vBoundingBoxRU,
false);
1490 calibration->CameraToImageCoordinates(vCorner3, vBoundingBoxLL,
false);
1491 calibration->CameraToImageCoordinates(vCorner4, vBoundingBoxRL,
false);
1497 if (vBoundingBoxLU.y > vBoundingBoxLL.y)
1499 Math2d::SetVec(vTemp, vBoundingBoxLU);
1500 Math2d::SetVec(vBoundingBoxLU, vBoundingBoxLL);
1501 Math2d::SetVec(vBoundingBoxLL, vTemp);
1504 if (vBoundingBoxLU.y > vBoundingBoxRL.y)
1506 Math2d::SetVec(vTemp, vBoundingBoxLU);
1507 Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRL);
1508 Math2d::SetVec(vBoundingBoxRL, vTemp);
1511 if (vBoundingBoxRU.y > vBoundingBoxLL.y)
1513 Math2d::SetVec(vTemp, vBoundingBoxRU);
1514 Math2d::SetVec(vBoundingBoxRU, vBoundingBoxLL);
1515 Math2d::SetVec(vBoundingBoxLL, vTemp);
1518 if (vBoundingBoxRU.y > vBoundingBoxRL.y)
1520 Math2d::SetVec(vTemp, vBoundingBoxRU);
1521 Math2d::SetVec(vBoundingBoxRU, vBoundingBoxRL);
1522 Math2d::SetVec(vBoundingBoxRL, vTemp);
1525 if (vBoundingBoxLU.x > vBoundingBoxRU.x)
1527 Math2d::SetVec(vTemp, vBoundingBoxLU);
1528 Math2d::SetVec(vBoundingBoxLU, vBoundingBoxRU);
1529 Math2d::SetVec(vBoundingBoxRU, vTemp);
1532 if (vBoundingBoxLL.x > vBoundingBoxRL.x)
1534 Math2d::SetVec(vTemp, vBoundingBoxLL);
1535 Math2d::SetVec(vBoundingBoxLL, vBoundingBoxRL);
1536 Math2d::SetVec(vBoundingBoxRL, vTemp);
1544 void ObjectLearningByPushing::ReportObjectPositionInformationToObserver()
1546 Vec3d vHypothesisPosition, vPrincipalAxis1, vPrincipalAxis2, vPrincipalAxis3, vEigenValues, vMaxExtents;
1547 Vec2d vBoundingBoxLeftUpper, vBoundingBoxRightUpper, vBoundingBoxLeftLower, vBoundingBoxRightLower;
1548 float fHypothesisExtent;
1549 vHypothesisPosition = GetObjectPosition(fHypothesisExtent);
1550 GetHypothesisPrincipalAxesAndBoundingBox(vPrincipalAxis1, vPrincipalAxis2, vPrincipalAxis3, vEigenValues, vMaxExtents,
1551 vBoundingBoxLeftUpper, vBoundingBoxRightUpper, vBoundingBoxLeftLower, vBoundingBoxRightLower);
1553 Eigen::Vector3f vec(vHypothesisPosition.x, vHypothesisPosition.y, vHypothesisPosition.z);
1557 principalAxis1->x = vPrincipalAxis1.x;
1558 principalAxis1->y = vPrincipalAxis1.y;
1559 principalAxis1->z = vPrincipalAxis1.z;
1561 principalAxis2->x = vPrincipalAxis2.x;
1562 principalAxis2->y = vPrincipalAxis2.y;
1563 principalAxis2->z = vPrincipalAxis2.z;
1565 principalAxis3->x = vPrincipalAxis3.x;
1566 principalAxis3->y = vPrincipalAxis3.y;
1567 principalAxis3->z = vPrincipalAxis3.z;
1569 eigenvalues->x = vEigenValues.x;
1570 eigenvalues->y = vEigenValues.y;
1571 eigenvalues->z = vEigenValues.z;
1575 debugDrawer->setSphereVisu(getName(),
"GlobalObj", globalObjPose, DrawColor {0, 0, 1, 1}, 50.0);
1577 listener->reportObjectHypothesisPosition(hypothesisPosition, fHypothesisExtent, principalAxis1, principalAxis2, principalAxis3, eigenvalues);
1583 void ObjectLearningByPushing::SetHeadToPlatformTransformation(
Vec3d vTranslation, Mat3d mRotation,
bool bResetOldTransformation)
1585 if (bResetOldTransformation)
1587 Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, vTranslation);
1588 Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, mRotation);
1592 Math3d::SetVec(m_tHeadToPlatformTransformationOld.translation, m_tHeadToPlatformTransformation.translation);
1593 Math3d::SetMat(m_tHeadToPlatformTransformationOld.rotation, m_tHeadToPlatformTransformation.rotation);
1596 Math3d::SetVec(m_tHeadToPlatformTransformation.translation, vTranslation);
1597 Math3d::SetMat(m_tHeadToPlatformTransformation.rotation, mRotation);
1603 void ObjectLearningByPushing::LoadAndFuseObjectSegmentations(std::string sObjectName)
1606 const int nFileIndexStart = 1;
1608 for (
int nMaxFileIndex = 2; nMaxFileIndex <= 51; nMaxFileIndex++)
1610 std::vector<CObjectHypothesis*> aObjectHypotheses;
1620 int nFileIndex = nFileIndexStart;
1624 aObjectHypotheses.push_back(pHypothesis);
1643 pImg->SaveToFile(sFileName.c_str());
1646 for (
size_t i = 0; i < aObjectHypotheses.size(); i++)
1648 delete aObjectHypotheses.at(i);
1658 CObjectHypothesis* ObjectLearningByPushing::SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints,
const bool bPreferCentralObject)
1665 if (m_pConfirmedHypotheses->GetSize() > 0)
1667 hypotheses = m_pConfirmedHypotheses;
1669 else if (m_pInitialHypothesesAtLocalMaxima->GetSize() > 0)
1671 hypotheses = m_pInitialHypothesesAtLocalMaxima;
1676 if (bPreferCentralObject)
1681 Vec3d vCenter, vTemp;
1682 Math3d::SubtractVecVec(vCenterPlatform, m_tHeadToPlatformTransformation.translation, vTemp);
1683 Math3d::Transpose(m_tHeadToPlatformTransformation.rotation, mRotInv);
1684 Math3d::MulMatVec(mRotInv, vTemp, vCenter);
1685 ARMARX_VERBOSE <<
"Central point in platform cs: " << vCenterPlatform.x <<
", " << vCenterPlatform.y <<
", " << vCenterPlatform.z;
1686 ARMARX_VERBOSE <<
"m_tHeadToPlatformTransformation.translation: " << m_tHeadToPlatformTransformation.translation.x <<
", " << m_tHeadToPlatformTransformation.translation.y <<
", " << m_tHeadToPlatformTransformation.translation.z;
1687 ARMARX_VERBOSE <<
"m_tHeadToPlatformTransformation.rotation: " << m_tHeadToPlatformTransformation.rotation.r1 <<
", " << m_tHeadToPlatformTransformation.rotation.r2 <<
", " << m_tHeadToPlatformTransformation.rotation.r3 <<
", "
1688 << m_tHeadToPlatformTransformation.rotation.r4 <<
", " << m_tHeadToPlatformTransformation.rotation.r5 <<
", " << m_tHeadToPlatformTransformation.rotation.r6 <<
", "
1689 << m_tHeadToPlatformTransformation.rotation.r7 <<
", " << m_tHeadToPlatformTransformation.rotation.r8 <<
", " << m_tHeadToPlatformTransformation.rotation.r9;
1690 ARMARX_VERBOSE <<
"Central point in camera cs: " << vCenter.x <<
", " << vCenter.y <<
", " << vCenter.z;
1696 float fMinDist = Math3d::Distance((*hypotheses)[0]->vCenter, vCenter);
1698 for (
int i = 1; i < hypotheses->GetSize(); i++)
1700 if (Math3d::Distance((*hypotheses)[i]->vCenter, vCenter) < fMinDist)
1705 fMinDist = Math3d::Distance((*hypotheses)[i]->vCenter, vCenter);
1711 pHypothesis = (*hypotheses)[nBestIndex];
1715 pHypothesis = (*hypotheses)[0];
1731 << pHypothesis->
vCenter.z <<
") is chosen for pushing";
1743 void ObjectLearningByPushing::convertFileOLPtoPCL(std::string
filename,
bool includeCandidatePoints)
1745 setlocale(LC_NUMERIC,
"C");
1747 FILE* filePtr = fopen(
filename.c_str(),
"rt");
1749 if (filePtr == NULL)
1755 pcl::PointCloud<pcl::PointXYZRGBA>::Ptr objectPointCloud(
new pcl::PointCloud<pcl::PointXYZRGBA>());
1759 float r, g, b, intensity;
1760 pcl::PointXYZRGBA point;
1761 int dummy = fscanf(filePtr,
"%lu", &nNumPoints);
1763 for (
size_t i = 0; i < nNumPoints; i++)
1765 dummy += fscanf(filePtr,
"%e %e %e", &point.x, &point.y, &point.z);
1766 dummy += fscanf(filePtr,
"%e %e %e %e \n", &r, &g, &b, &intensity);
1767 point.r = r * 255 * intensity;
1768 point.g = g * 255 * intensity;
1769 point.b = b * 255 * intensity;
1770 objectPointCloud->push_back(point);
1773 if (includeCandidatePoints)
1776 dummy += fscanf(filePtr,
"%lu", &nNumPoints);
1778 for (
size_t i = 0; i < nNumPoints; i++)
1780 dummy += fscanf(filePtr,
"%e %e %e", &point.x, &point.y, &point.z);
1781 dummy += fscanf(filePtr,
"%e %e %e %e \n", &r, &g, &b, &intensity);
1782 point.r = r * 255 * intensity;
1783 point.g = g * 255 * intensity;
1784 point.b = b * 255 * intensity;
1785 objectPointCloud->push_back(point);
1793 pcl::io::savePCDFile(
filename, *objectPointCloud);
1797 void ObjectLearningByPushing::BoundingBoxInForegroundImage(CByteImage* image,
int minX,
int maxX,
int minY,
int maxY)
1799 for (
int i = 0; i < image->height; i++)
1801 if (i < minY || i > maxY)
1803 for (
int j = 0; j < image->width; j++)
1805 if (j < minX || j > maxX)
1807 image->pixels[i * image->width + j] = 0;