ObjectLearningByPushing.h
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 
25 #pragma once
26 
27 #include <map>
28 #include <string>
29 #include <set>
30 
31 
32 // Core
36 
37 #include <RobotAPI/interface/visualization/DebugDrawerInterface.h>
38 
40 
41 // MemoryX
42 #include <MemoryX/interface/observers/ObjectMemoryObserverInterface.h>
43 
44 // VisionX
46 #include <VisionX/interface/components/ObjectLearningByPushing.h>
47 #include <VisionX/interface/components/PointCloudAndImageAndCalibrationProviderInterface.h>
48 #include <VisionX/interface/components/RGBDImageProvider.h>
49 
50 
51 // IVT
52 #include <Math/Math3d.h>
53 #include <Math/Math2d.h>
54 
55 
57 #include "ObjectHypothesis.h"
59 
60 #include <mutex>
61 
62 
63 // forward declarations
65 class CStereoCalibration;
66 class CCalibration;
68 class CByteImage;
69 class CGaussBackground;
70 class CommunicationWithRobotArmarX;
71 class CDataToRobot;
72 class CDataFromRobot;
73 
74 
75 
76 namespace visionx
77 {
78 
81  {
82  public:
85  {
86  defineOptionalProperty<std::string>("ImageProviderAdapterName", "Armar3ImageProvider", "Ice Adapter name of the image provider");
87  defineOptionalProperty<std::string>("PointCloudProviderAdapterName", "PointCloudProvider", "Ice Adapter name of the point cloud provider");
88  defineOptionalProperty<std::string>("RobotStateProxyName", "RobotStateComponent", "Ice Adapter name of the robot state proxy");
89  defineOptionalProperty<std::string>("CameraFrameName", "EyeLeftCamera", "Name of the robot state frame of the primary camera");
90 
91  defineOptionalProperty<std::string>("DebugDrawerTopicName", "DebugDrawerUpdates", "name of DebugDrawer topic");
92  }
93  };
94 
95 
96  /**
97  * ObjectLearningByPushing is the vision part of the approach for interactive object segmentation. It is used by the statechart defined in
98  * (how do I reference that here?). Initially, object hypotheses are created based on the stereocamera images. One of these hypotheses is
99  * then pushed. The resulting motion is used to segment the one or more objects that moved from the rest of the scene. The segmented objects
100  * are represented in the form of RGBD point clouds. (can I cite a publication or link to a website here?)
101  *
102  * \componentproperties
103  * \prop VisionX.RobotHandLocalizationWithFingertips.ImageProviderAdapterName: Name of the
104  * image provider that delivers the camera images.
105  * \prop VisionX.RobotHandLocalizationWithFingertips.RobotStateProxyName: Name of the robot state
106  * proxy used to obtain the current robot state.
107  * \prop VisionX.RobotHandLocalizationWithFingertips.CameraFrameName: Name of the robot model frame of the primary camera
108  *
109  */
110 
113  virtual public visionx::ObjectLearningByPushingInterface
114  {
115  public:
116 
117  /**
118  * @see Component::getDefaultName()
119  */
120  std::string getDefaultName() const override
121  {
122  return "ObjectLearningByPushing";
123  }
124 
125 
126  /**
127  * Creates the initial object hypotheses.
128  */
129  void CreateInitialObjectHypotheses(const ::Ice::Current& c = Ice::emptyCurrent) override;
130 
131  /**
132  * Validates the initial object hypotheses after the first push.
133  */
134  void ValidateInitialObjectHypotheses(const ::Ice::Current& c = Ice::emptyCurrent) override;
135 
136  /**
137  * Re-validates the confirmed object hypotheses after the second and later pushes.
138  */
139  void RevalidateConfirmedObjectHypotheses(const ::Ice::Current& c = Ice::emptyCurrent) override;
140 
141  /**
142  * Returns the confirmed points constituting the object hypothesis. If several confirmed objects are available,
143  * the one containing the biggest number of confirmed points is returned.
144  */
145  visionx::types::PointList getObjectHypothesisPoints(const ::Ice::Current& c = Ice::emptyCurrent) override;
146 
147 
148  visionx::types::PointList getScenePoints(const ::Ice::Current& c = Ice::emptyCurrent) override;
149 
150 
151  armarx::Vector3BasePtr getUpwardsVector(const ::Ice::Current& c = Ice::emptyCurrent) override;
152 
153 
154  std::string getReferenceFrameName(const ::Ice::Current& c = Ice::emptyCurrent) override;
155 
156 
157  /**
158  * Returns the last transformation that the preferred object hypothesis underwent.
159  */
160  armarx::PoseBasePtr getLastObjectTransformation(const ::Ice::Current& c = Ice::emptyCurrent) override;
161 
162  /**
163  * Returns the last transformation that the preferred object hypothesis underwent.
164  */
165  void recognizeObject(const std::string& objectName, const ::Ice::Current& c = Ice::emptyCurrent) override;
166 
167 
169  {
176  eQuit = 42
177  };
178 
179  protected:
180  // inherited from PointCloudAndImageProcessor
181  void onInitPointCloudAndImageProcessor() override;
182  void onConnectPointCloudAndImageProcessor() override;
183  void onExitPointCloudAndImageProcessor() override;
184 
185  void process() override;
186 
187 
188  /**
189  * @see PropertyUser::createPropertyDefinitions()
190  */
192  {
194  }
195 
196  private:
197 
198  void CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight, CByteImage* pImageColorLeft, CByteImage* pImageColorRight, int nImageNumber);
199 
200  bool ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight, CByteImage* pImageColorLeft, CByteImage* pImageColorRight, int nImageNumber);
201 
202  bool RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight, CByteImage* pImageColorLeft, CByteImage* pImageColorRight, int nImageNumber);
203 
204 
205 
206  bool SaveHistogramOfConfirmedHypothesis(std::string sObjectName, int nDescriptorNumber = 0);
207  void RecognizeHypotheses(CByteImage* pImageColorLeft, const std::string objectName = "");
208 
209 
210  void VisualizeHypotheses(CByteImage* pImageGreyLeft, CByteImage* pImageGreyRight, CByteImage* pImageColorLeft, CByteImage* pImageColorRight, bool bShowConfirmedHypotheses,
211  CByteImage* pResultImageLeft = NULL, CByteImage* pResultImageRight = NULL, bool bMakeScreenshot = false);
212  void RefreshVisualization(bool bConfirmedHypotheses)
213  {
214  m_pHypothesisVisualization->RefreshVisualization(bConfirmedHypotheses);
215  }
216 
217 
218  void UpdateDataFromRobot();
219 
220  void ApplyHeadMotionTransformation(Mat3d mRotation, Vec3d vTranslation);
221 
222  void SetHeadToPlatformTransformation(Vec3d vTranslation, Mat3d mRotation, bool bResetOldTransformation = false);
223 
224 
225  int GetNumberOfNonconfirmedHypotheses()
226  {
227  return m_pObjectHypotheses->GetSize();
228  }
229 
230 
231  Vec3d GetObjectPosition(float& fObjectExtent, bool bPreferCentralObject = true);
232 
233  void GetHypothesisBoundingBox(int& nMinX, int& nMaxX, int& nMinY, int& nMaxY);
234 
235  void GetHypothesisPrincipalAxesAndBoundingBox(Vec3d& vPrincipalAxis1, Vec3d& vPrincipalAxis2, Vec3d& vPrincipalAxis3,
236  Vec3d& vEigenValues, Vec3d& vMaxExtentFromCenter,
237  Vec2d& vBoundingBoxLU, Vec2d& vBoundingBoxRU, Vec2d& vBoundingBoxLL, Vec2d& vBoundingBoxRL);
238 
239  void ReportObjectPositionInformationToObserver();
240 
241 
242  void LoadAndFuseObjectSegmentations(std::string sObjectName);
243 
244 
245  void SwapAllPointsArraysToOld();
246 
247 
248  CObjectHypothesis* SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints, const bool bPreferCentralObject = true);
249 
250 
251  void convertFileOLPtoPCL(std::string filename, bool includeCandidatePoints = false);
252 
253  void BoundingBoxInForegroundImage(CByteImage* image, int minX, int maxX, int minY, int maxY);
254 
255 
256 
257  // VisionX framework
258  std::string imageProviderName, pointcloudProviderName, robotStateProxyName, cameraFrameName;
260  ImageProviderInterfacePrx imageProviderProxy;
261  // CapturingPointCloudAndImageAndCalibrationProviderInterfacePrx pointcloudProviderProxy;
262  RGBDPointCloudProviderInterfacePrx pointcloudProviderProxy;
263  bool connected = false;
264 
265 
266  ObjectLearningByPushingListenerPrx listener;
267 
268 
269  OLPControlMode currentState;
270 
271  CFeatureCalculation* m_pFeatureCalculation;
272  CHypothesisGeneration* m_pHypothesisGeneration;
273  CHypothesisVisualization* m_pHypothesisVisualization;
274 
275  CCalibration* calibration;
276 
277  bool m_bMakeIntermediateScreenshots;
278  std::string m_sScreenshotPath;
279 
280  CByteImage* colorImageLeft, *colorImageRight, *greyImageLeft, *greyImageRight, *colorImageLeftOld, *resultImageLeft, *resultImageRight, *tempResultImage;
281  CByteImage** cameraImages, ** resultImages;
282 
283  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointCloudPtr;
284 
285  Transformation3d tHeadToPlatformTransformation;
286 
287  int iterationCounter;
288 
289  CObjectHypothesisArray* m_pObjectHypotheses, *m_pConfirmedHypotheses, *m_pInitialHypothesesAtLocalMaxima;
290 
291  CSIFTFeatureArray* m_pAllNewSIFTPoints;
292  CSIFTFeatureArray* m_pAllOldSIFTPoints;
293  std::vector<CMSERDescriptor3D*>* m_pAllNewMSERs;
294  std::vector<CMSERDescriptor3D*>* m_pAllOldMSERs;
295  std::vector<CMSERDescriptor3D*>* m_pCorrespondingMSERs;
296  std::vector<CHypothesisPoint*>* m_pAllNewDepthMapPoints;
297  std::vector<CHypothesisPoint*>* m_pAllOldDepthMapPoints;
298 
299  CGaussBackground* m_pGaussBackground;
300 
301  Transformation3d m_tHeadToPlatformTransformation, m_tHeadToPlatformTransformationOld;
302  Vec3d upwardsVector;
303 
304  // debug
305  CByteImage* m_pSegmentedBackgroundImage;
306  CByteImage* m_pDisparityImage;
307 
308  std::mutex processingLock;
309 
310 
311 
313  VirtualRobot::RobotPtr localRobot;
314 
315  };
316 
317 }
318 
RemoteRobot.h
PointCloudAndImageProcessor.h
LinkedPose.h
visionx::ObjectLearningByPushing::getReferenceFrameName
std::string getReferenceFrameName(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:151
visionx::ObjectLearningByPushing::eHasInitialHypotheses
@ eHasInitialHypotheses
Definition: ObjectLearningByPushing.h:172
visionx
ArmarX headers.
Definition: OpenPoseStressTest.h:38
ObjectHypothesis.h
visionx::ObjectLearningByPushing::eCreatingInitialHypotheses
@ eCreatingInitialHypotheses
Definition: ObjectLearningByPushing.h:171
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:168
visionx::ObjectLearningByPushing::onInitPointCloudAndImageProcessor
void onInitPointCloudAndImageProcessor() override
Setup the vision component.
Definition: ObjectLearningByPushing.cpp:191
visionx::ObjectLearningByPushing::recognizeObject
void recognizeObject(const std::string &objectName, const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
Definition: ObjectLearningByPushing.cpp:177
visionx::ObjectLearningByPushing::ValidateInitialObjectHypotheses
void ValidateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Validates the initial object hypotheses after the first push.
Definition: ObjectLearningByPushing.cpp:84
armarx::PropertyDefinitionContainer::prefix
std::string prefix
Prefix of the properties such as namespace, domain, component name, etc.
Definition: PropertyDefinitionContainer.h:333
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:694
c
constexpr T c
Definition: UnscentedKalmanFilterTest.cpp:43
visionx::ObjectLearningByPushingPropertyDefinitions::ObjectLearningByPushingPropertyDefinitions
ObjectLearningByPushingPropertyDefinitions(std::string prefix)
Definition: ObjectLearningByPushing.h:83
Observer.h
visionx::ObjectLearningByPushing::eNoHypotheses
@ eNoHypotheses
Definition: ObjectLearningByPushing.h:170
visionx::ObjectLearningByPushing::eQuit
@ eQuit
Definition: ObjectLearningByPushing.h:176
ObjectLearningByPushingDefinitions.h
CHypothesisVisualization
Definition: HypothesisVisualization.h:39
CGaussBackground
Definition: GaussBackground.h:51
visionx::ObjectLearningByPushing::onExitPointCloudAndImageProcessor
void onExitPointCloudAndImageProcessor() override
Exit the ImapeProcessor component.
Definition: ObjectLearningByPushing.cpp:370
CFeatureCalculation
Definition: FeatureCalculation.h:43
visionx::ObjectLearningByPushing::RevalidateConfirmedObjectHypotheses
void RevalidateConfirmedObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Re-validates the confirmed object hypotheses after the second and later pushes.
Definition: ObjectLearningByPushing.cpp:92
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:695
visionx::ObjectLearningByPushing::getLastObjectTransformation
armarx::PoseBasePtr getLastObjectTransformation(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
Definition: ObjectLearningByPushing.cpp:159
visionx::ObjectLearningByPushing::createPropertyDefinitions
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
Definition: ObjectLearningByPushing.h:191
visionx::ObjectLearningByPushing::getScenePoints
visionx::types::PointList getScenePoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:122
visionx::ObjectLearningByPushing::getDefaultName
std::string getDefaultName() const override
Definition: ObjectLearningByPushing.h:120
filename
std::string filename
Definition: VisualizationRobot.cpp:84
visionx::ObjectLearningByPushing::getObjectHypothesisPoints
visionx::types::PointList getObjectHypothesisPoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the confirmed points constituting the object hypothesis.
Definition: ObjectLearningByPushing.cpp:100
CHypothesisVisualization::RefreshVisualization
void RefreshVisualization(bool bConfirmedHypotheses)
Definition: HypothesisVisualization.cpp:388
visionx::ObjectLearningByPushing
ObjectLearningByPushing is the vision part of the approach for interactive object segmentation.
Definition: ObjectLearningByPushing.h:111
HypothesisVisualization.h
CObjectHypothesis
Definition: ObjectHypothesis.h:249
visionx::ObjectLearningByPushing::eRevalidatingConfirmedHypotheses
@ eRevalidatingConfirmedHypotheses
Definition: ObjectLearningByPushing.h:175
armarx::Component::getConfigIdentifier
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition: Component.cpp:74
CObjectHypothesisArray
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
Definition: ObjectHypothesis.h:359
armarx::ComponentPropertyDefinitions
Default component property definition container.
Definition: Component.h:70
IceUtil::Handle< class PropertyDefinitionContainer >
visionx::ObjectLearningByPushing::process
void process() override
Process the vision component.
Definition: ObjectLearningByPushing.cpp:449
visionx::ObjectLearningByPushingPropertyDefinitions
Definition: ObjectLearningByPushing.h:79
IceInternal::ProxyHandle<::IceProxy::armarx::RobotStateComponentInterface >
armarx::ComponentPropertyDefinitions::ComponentPropertyDefinitions
ComponentPropertyDefinitions(std::string prefix, bool hasObjectNameParameter=true)
Definition: Component.cpp:37
visionx::ObjectLearningByPushing::eHasConfirmedHypotheses
@ eHasConfirmedHypotheses
Definition: ObjectLearningByPushing.h:174
visionx::ObjectLearningByPushing::eValidatingInitialHypotheses
@ eValidatingInitialHypotheses
Definition: ObjectLearningByPushing.h:173
visionx::ObjectLearningByPushing::CreateInitialObjectHypotheses
void CreateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Creates the initial object hypotheses.
Definition: ObjectLearningByPushing.cpp:76
armarx::PropertyDefinitionsPtr
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
Definition: forward_declarations.h:34
CHypothesisGeneration
Definition: HypothesisGeneration.h:50
visionx::ObjectLearningByPushing::getUpwardsVector
armarx::Vector3BasePtr getUpwardsVector(const ::Ice::Current &c=Ice::emptyCurrent) override
Definition: ObjectLearningByPushing.cpp:143
visionx::ObjectLearningByPushing::OLPControlMode
OLPControlMode
Definition: ObjectLearningByPushing.h:168
ChannelRef.h
VirtualRobot::RobotPtr
std::shared_ptr< class Robot > RobotPtr
Definition: Bus.h:18
visionx::PointCloudAndImageProcessor
The PointCloudAndImageProcessor class provides an interface for access to PointCloudProviders and Ima...
Definition: PointCloudAndImageProcessor.h:97
visionx::ObjectLearningByPushing::onConnectPointCloudAndImageProcessor
void onConnectPointCloudAndImageProcessor() override
Implement this method in your PointCloudAndImageProcessor in order execute parts when the component i...
Definition: ObjectLearningByPushing.cpp:221