ObjectLearningByPushing.h
Go to the documentation of this file.
1/*
2 * This file is part of ArmarX.
3 *
4 * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5 *
6 * ArmarX is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ArmarX is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * @package
19 * @author
20 * @date
21 * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22 * GNU General Public License
23 */
24
25#pragma once
26
27#include <map>
28#include <set>
29#include <string>
30
31
32// Core
35
36#include <RobotAPI/interface/visualization/DebugDrawerInterface.h>
39
40// MemoryX
41#include <MemoryX/interface/observers/ObjectMemoryObserverInterface.h>
42
43// VisionX
45#include <VisionX/interface/components/ObjectLearningByPushing.h>
46#include <VisionX/interface/components/PointCloudAndImageAndCalibrationProviderInterface.h>
47#include <VisionX/interface/components/RGBDImageProvider.h>
48
49
50// IVT
51#include <mutex>
52
54#include "ObjectHypothesis.h"
56#include <Math/Math2d.h>
57#include <Math/Math3d.h>
58
59
60// forward declarations
62class CStereoCalibration;
63class CCalibration;
65class CByteImage;
67class CommunicationWithRobotArmarX;
68class CDataToRobot;
69class CDataFromRobot;
70
71namespace visionx
72{
73
75 {
76 public:
79 {
80 defineOptionalProperty<std::string>("ImageProviderAdapterName",
81 "Armar3ImageProvider",
82 "Ice Adapter name of the image provider");
83 defineOptionalProperty<std::string>("PointCloudProviderAdapterName",
84 "PointCloudProvider",
85 "Ice Adapter name of the point cloud provider");
86 defineOptionalProperty<std::string>("RobotStateProxyName",
87 "RobotStateComponent",
88 "Ice Adapter name of the robot state proxy");
90 "CameraFrameName",
91 "EyeLeftCamera",
92 "Name of the robot state frame of the primary camera");
93
95 "DebugDrawerTopicName", "DebugDrawerUpdates", "name of DebugDrawer topic");
96 }
97 };
98
99 /**
100 * ObjectLearningByPushing is the vision part of the approach for interactive object segmentation. It is used by the statechart defined in
101 * (how do I reference that here?). Initially, object hypotheses are created based on the stereocamera images. One of these hypotheses is
102 * then pushed. The resulting motion is used to segment the one or more objects that moved from the rest of the scene. The segmented objects
103 * are represented in the form of RGBD point clouds. (can I cite a publication or link to a website here?)
104 *
105 * \componentproperties
106 * \prop VisionX.RobotHandLocalizationWithFingertips.ImageProviderAdapterName: Name of the
107 * image provider that delivers the camera images.
108 * \prop VisionX.RobotHandLocalizationWithFingertips.RobotStateProxyName: Name of the robot state
109 * proxy used to obtain the current robot state.
110 * \prop VisionX.RobotHandLocalizationWithFingertips.CameraFrameName: Name of the robot model frame of the primary camera
111 *
112 */
113
116 virtual public visionx::ObjectLearningByPushingInterface
117 {
118 public:
119 /**
120 * @see Component::getDefaultName()
121 */
122 std::string
123 getDefaultName() const override
124 {
125 return "ObjectLearningByPushing";
126 }
127
128 /**
129 * Creates the initial object hypotheses.
130 */
131 void CreateInitialObjectHypotheses(const ::Ice::Current& c = Ice::emptyCurrent) override;
132
133 /**
134 * Validates the initial object hypotheses after the first push.
135 */
136 void ValidateInitialObjectHypotheses(const ::Ice::Current& c = Ice::emptyCurrent) override;
137
138 /**
139 * Re-validates the confirmed object hypotheses after the second and later pushes.
140 */
141 void
142 RevalidateConfirmedObjectHypotheses(const ::Ice::Current& c = Ice::emptyCurrent) override;
143
144 /**
145 * Returns the confirmed points constituting the object hypothesis. If several confirmed objects are available,
146 * the one containing the biggest number of confirmed points is returned.
147 */
148 visionx::types::PointList
149 getObjectHypothesisPoints(const ::Ice::Current& c = Ice::emptyCurrent) override;
150
151
152 visionx::types::PointList
153 getScenePoints(const ::Ice::Current& c = Ice::emptyCurrent) override;
154
155
156 armarx::Vector3BasePtr
157 getUpwardsVector(const ::Ice::Current& c = Ice::emptyCurrent) override;
158
159
160 std::string getReferenceFrameName(const ::Ice::Current& c = Ice::emptyCurrent) override;
161
162
163 /**
164 * Returns the last transformation that the preferred object hypothesis underwent.
165 */
166 armarx::PoseBasePtr
167 getLastObjectTransformation(const ::Ice::Current& c = Ice::emptyCurrent) override;
168
169 /**
170 * Returns the last transformation that the preferred object hypothesis underwent.
171 */
172 void recognizeObject(const std::string& objectName,
173 const ::Ice::Current& c = Ice::emptyCurrent) override;
174
185
186 protected:
187 // inherited from PointCloudAndImageProcessor
188 void onInitPointCloudAndImageProcessor() override;
190 void onExitPointCloudAndImageProcessor() override;
191
192 void process() override;
193
194 /**
195 * @see PropertyUser::createPropertyDefinitions()
196 */
203
204 private:
205 void CreateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
206 CByteImage* pImageGreyRight,
207 CByteImage* pImageColorLeft,
208 CByteImage* pImageColorRight,
209 int nImageNumber);
210
211 bool ValidateInitialObjectHypothesesInternal(CByteImage* pImageGreyLeft,
212 CByteImage* pImageGreyRight,
213 CByteImage* pImageColorLeft,
214 CByteImage* pImageColorRight,
215 int nImageNumber);
216
217 bool RevalidateConfirmedObjectHypothesesInternal(CByteImage* pImageGreyLeft,
218 CByteImage* pImageGreyRight,
219 CByteImage* pImageColorLeft,
220 CByteImage* pImageColorRight,
221 int nImageNumber);
222
223
224 bool SaveHistogramOfConfirmedHypothesis(std::string sObjectName, int nDescriptorNumber = 0);
225 void RecognizeHypotheses(CByteImage* pImageColorLeft, const std::string objectName = "");
226
227
228 void VisualizeHypotheses(CByteImage* pImageGreyLeft,
229 CByteImage* pImageGreyRight,
230 CByteImage* pImageColorLeft,
231 CByteImage* pImageColorRight,
232 bool bShowConfirmedHypotheses,
233 CByteImage* pResultImageLeft = NULL,
234 CByteImage* pResultImageRight = NULL,
235 bool bMakeScreenshot = false);
236
237 void
238 RefreshVisualization(bool bConfirmedHypotheses)
239 {
240 m_pHypothesisVisualization->RefreshVisualization(bConfirmedHypotheses);
241 }
242
243 void UpdateDataFromRobot();
244
245 void ApplyHeadMotionTransformation(Mat3d mRotation, Vec3d vTranslation);
246
247 void SetHeadToPlatformTransformation(Vec3d vTranslation,
248 Mat3d mRotation,
249 bool bResetOldTransformation = false);
250
251 int
252 GetNumberOfNonconfirmedHypotheses()
253 {
254 return m_pObjectHypotheses->GetSize();
255 }
256
257 Vec3d GetObjectPosition(float& fObjectExtent, bool bPreferCentralObject = true);
258
259 void GetHypothesisBoundingBox(int& nMinX, int& nMaxX, int& nMinY, int& nMaxY);
260
261 void GetHypothesisPrincipalAxesAndBoundingBox(Vec3d& vPrincipalAxis1,
262 Vec3d& vPrincipalAxis2,
263 Vec3d& vPrincipalAxis3,
264 Vec3d& vEigenValues,
265 Vec3d& vMaxExtentFromCenter,
266 Vec2d& vBoundingBoxLU,
267 Vec2d& vBoundingBoxRU,
268 Vec2d& vBoundingBoxLL,
269 Vec2d& vBoundingBoxRL);
270
271 void ReportObjectPositionInformationToObserver();
272
273
274 void LoadAndFuseObjectSegmentations(std::string sObjectName);
275
276
277 void SwapAllPointsArraysToOld();
278
279
280 CObjectHypothesis* SelectPreferredHypothesis(std::vector<CHypothesisPoint*>*& pPoints,
281 const bool bPreferCentralObject = true);
282
283
284 void convertFileOLPtoPCL(std::string filename, bool includeCandidatePoints = false);
285
286 void
287 BoundingBoxInForegroundImage(CByteImage* image, int minX, int maxX, int minY, int maxY);
288
289
290 // VisionX framework
291 std::string imageProviderName, pointcloudProviderName, robotStateProxyName, cameraFrameName;
293 ImageProviderInterfacePrx imageProviderProxy;
294 // CapturingPointCloudAndImageAndCalibrationProviderInterfacePrx pointcloudProviderProxy;
295 RGBDPointCloudProviderInterfacePrx pointcloudProviderProxy;
296 bool connected = false;
297
298
299 ObjectLearningByPushingListenerPrx listener;
300
301
302 OLPControlMode currentState;
303
304 CFeatureCalculation* m_pFeatureCalculation;
305 CHypothesisGeneration* m_pHypothesisGeneration;
306 CHypothesisVisualization* m_pHypothesisVisualization;
307
308 CCalibration* calibration;
309
310 bool m_bMakeIntermediateScreenshots;
311 std::string m_sScreenshotPath;
312
313 CByteImage *colorImageLeft, *colorImageRight, *greyImageLeft, *greyImageRight,
314 *colorImageLeftOld, *resultImageLeft, *resultImageRight, *tempResultImage;
315 CByteImage **cameraImages, **resultImages;
316
317 pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointCloudPtr;
318
319 Transformation3d tHeadToPlatformTransformation;
320
321 int iterationCounter;
322
323 CObjectHypothesisArray *m_pObjectHypotheses, *m_pConfirmedHypotheses,
324 *m_pInitialHypothesesAtLocalMaxima;
325
326 CSIFTFeatureArray* m_pAllNewSIFTPoints;
327 CSIFTFeatureArray* m_pAllOldSIFTPoints;
328 std::vector<CMSERDescriptor3D*>* m_pAllNewMSERs;
329 std::vector<CMSERDescriptor3D*>* m_pAllOldMSERs;
330 std::vector<CMSERDescriptor3D*>* m_pCorrespondingMSERs;
331 std::vector<CHypothesisPoint*>* m_pAllNewDepthMapPoints;
332 std::vector<CHypothesisPoint*>* m_pAllOldDepthMapPoints;
333
334 CGaussBackground* m_pGaussBackground;
335
336 Transformation3d m_tHeadToPlatformTransformation, m_tHeadToPlatformTransformationOld;
337 Vec3d upwardsVector;
338
339 // debug
340 CByteImage* m_pSegmentedBackgroundImage;
341 CByteImage* m_pDisparityImage;
342
343 std::mutex processingLock;
344
345
347 VirtualRobot::RobotPtr localRobot;
348 };
349
350} // namespace visionx
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
constexpr T c
void RefreshVisualization(bool bConfirmedHypotheses)
Default component property definition container.
Definition Component.h:70
ComponentPropertyDefinitions(std::string prefix, bool hasObjectNameParameter=true)
Definition Component.cpp:46
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition Component.cpp:90
std::string prefix
Prefix of the properties such as namespace, domain, component name, etc.
PropertyDefinition< PropertyType > & defineOptionalProperty(const std::string &name, PropertyType defaultValue, const std::string &description="", PropertyDefinitionBase::PropertyConstness constness=PropertyDefinitionBase::eConstant)
ObjectLearningByPushing is the vision part of the approach for interactive object segmentation.
armarx::Vector3BasePtr getUpwardsVector(const ::Ice::Current &c=Ice::emptyCurrent) override
void onConnectPointCloudAndImageProcessor() override
Implement this method in your PointCloudAndImageProcessor in order execute parts when the component i...
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
void process() override
Process the vision component.
visionx::types::PointList getObjectHypothesisPoints(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the confirmed points constituting the object hypothesis.
visionx::types::PointList getScenePoints(const ::Ice::Current &c=Ice::emptyCurrent) override
void CreateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Creates the initial object hypotheses.
void recognizeObject(const std::string &objectName, const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
armarx::PoseBasePtr getLastObjectTransformation(const ::Ice::Current &c=Ice::emptyCurrent) override
Returns the last transformation that the preferred object hypothesis underwent.
void ValidateInitialObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Validates the initial object hypotheses after the first push.
void onInitPointCloudAndImageProcessor() override
Setup the vision component.
void RevalidateConfirmedObjectHypotheses(const ::Ice::Current &c=Ice::emptyCurrent) override
Re-validates the confirmed object hypotheses after the second and later pushes.
std::string getReferenceFrameName(const ::Ice::Current &c=Ice::emptyCurrent) override
void onExitPointCloudAndImageProcessor() override
Exit the ImapeProcessor component.
std::string getDefaultName() const override
The PointCloudAndImageProcessor class provides an interface for access to PointCloudProviders and Ima...
VectorXD< 3, double > Vec3d
Definition VectorXD.h:737
std::shared_ptr< class Robot > RobotPtr
Definition Bus.h:19
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
::IceInternal::ProxyHandle<::IceProxy::armarx::RobotStateComponentInterface > RobotStateComponentInterfacePrx
::IceInternal::ProxyHandle<::IceProxy::armarx::DebugDrawerInterface > DebugDrawerInterfacePrx
ArmarX headers.