36 #include <RobotAPI/interface/visualization/DebugDrawerInterface.h>
41 #include <MemoryX/interface/observers/ObjectMemoryObserverInterface.h>
45 #include <VisionX/interface/components/ObjectLearningByPushing.h>
46 #include <VisionX/interface/components/PointCloudAndImageAndCalibrationProviderInterface.h>
47 #include <VisionX/interface/components/RGBDImageProvider.h>
56 #include <Math/Math2d.h>
57 #include <Math/Math3d.h>
62 class CStereoCalibration;
67 class CommunicationWithRobotArmarX;
80 defineOptionalProperty<std::string>(
"ImageProviderAdapterName",
81 "Armar3ImageProvider",
82 "Ice Adapter name of the image provider");
83 defineOptionalProperty<std::string>(
"PointCloudProviderAdapterName",
85 "Ice Adapter name of the point cloud provider");
86 defineOptionalProperty<std::string>(
"RobotStateProxyName",
87 "RobotStateComponent",
88 "Ice Adapter name of the robot state proxy");
89 defineOptionalProperty<std::string>(
92 "Name of the robot state frame of the primary camera");
94 defineOptionalProperty<std::string>(
95 "DebugDrawerTopicName",
"DebugDrawerUpdates",
"name of DebugDrawer topic");
116 virtual public visionx::ObjectLearningByPushingInterface
125 return "ObjectLearningByPushing";
148 visionx::types::PointList
152 visionx::types::PointList
156 armarx::Vector3BasePtr
173 const ::Ice::Current&
c = Ice::emptyCurrent)
override;
205 void CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
206 CByteImage* pImageGreyRight,
207 CByteImage* pImageColorLeft,
208 CByteImage* pImageColorRight,
211 bool ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
212 CByteImage* pImageGreyRight,
213 CByteImage* pImageColorLeft,
214 CByteImage* pImageColorRight,
217 bool RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft,
218 CByteImage* pImageGreyRight,
219 CByteImage* pImageColorLeft,
220 CByteImage* pImageColorRight,
224 bool SaveHistogramOfConfirmedHypothesis(std::string sObjectName,
int nDescriptorNumber = 0);
225 void RecognizeHypotheses(CByteImage* pImageColorLeft,
const std::string objectName =
"");
228 void VisualizeHypotheses(CByteImage* pImageGreyLeft,
229 CByteImage* pImageGreyRight,
230 CByteImage* pImageColorLeft,
231 CByteImage* pImageColorRight,
232 bool bShowConfirmedHypotheses,
233 CByteImage* pResultImageLeft = NULL,
234 CByteImage* pResultImageRight = NULL,
235 bool bMakeScreenshot =
false);
238 RefreshVisualization(
bool bConfirmedHypotheses)
243 void UpdateDataFromRobot();
245 void ApplyHeadMotionTransformation(Mat3d mRotation,
Vec3d vTranslation);
247 void SetHeadToPlatformTransformation(
Vec3d vTranslation,
249 bool bResetOldTransformation =
false);
252 GetNumberOfNonconfirmedHypotheses()
254 return m_pObjectHypotheses->GetSize();
257 Vec3d GetObjectPosition(
float& fObjectExtent,
bool bPreferCentralObject =
true);
259 void GetHypothesisBoundingBox(
int& nMinX,
int& nMaxX,
int& nMinY,
int& nMaxY);
261 void GetHypothesisPrincipalAxesAndBoundingBox(
Vec3d& vPrincipalAxis1,
262 Vec3d& vPrincipalAxis2,
263 Vec3d& vPrincipalAxis3,
265 Vec3d& vMaxExtentFromCenter,
266 Vec2d& vBoundingBoxLU,
267 Vec2d& vBoundingBoxRU,
268 Vec2d& vBoundingBoxLL,
269 Vec2d& vBoundingBoxRL);
271 void ReportObjectPositionInformationToObserver();
274 void LoadAndFuseObjectSegmentations(std::string sObjectName);
277 void SwapAllPointsArraysToOld();
280 CObjectHypothesis* SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints,
281 const bool bPreferCentralObject =
true);
284 void convertFileOLPtoPCL(std::string
filename,
bool includeCandidatePoints =
false);
287 BoundingBoxInForegroundImage(CByteImage* image,
int minX,
int maxX,
int minY,
int maxY);
291 std::string imageProviderName, pointcloudProviderName, robotStateProxyName, cameraFrameName;
293 ImageProviderInterfacePrx imageProviderProxy;
295 RGBDPointCloudProviderInterfacePrx pointcloudProviderProxy;
296 bool connected =
false;
299 ObjectLearningByPushingListenerPrx listener;
308 CCalibration* calibration;
310 bool m_bMakeIntermediateScreenshots;
311 std::string m_sScreenshotPath;
313 CByteImage *colorImageLeft, *colorImageRight, *greyImageLeft, *greyImageRight,
314 *colorImageLeftOld, *resultImageLeft, *resultImageRight, *tempResultImage;
315 CByteImage **cameraImages, **resultImages;
317 pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointCloudPtr;
319 Transformation3d tHeadToPlatformTransformation;
321 int iterationCounter;
324 *m_pInitialHypothesesAtLocalMaxima;
328 std::vector<CMSERDescriptor3D*>* m_pAllNewMSERs;
329 std::vector<CMSERDescriptor3D*>* m_pAllOldMSERs;
330 std::vector<CMSERDescriptor3D*>* m_pCorrespondingMSERs;
331 std::vector<CHypothesisPoint*>* m_pAllNewDepthMapPoints;
332 std::vector<CHypothesisPoint*>* m_pAllOldDepthMapPoints;
336 Transformation3d m_tHeadToPlatformTransformation, m_tHeadToPlatformTransformationOld;
340 CByteImage* m_pSegmentedBackgroundImage;
341 CByteImage* m_pDisparityImage;
343 std::mutex processingLock;