CoFusionProcessor.cpp
Go to the documentation of this file.
1/*
2 * This file is part of ArmarX.
3 *
4 * ArmarX is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * ArmarX is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * @package VisionX::ArmarXObjects::CoFusionProcessor
17 * @author Rainer Kartmann ( rainer dot kartmann at student dot kit dot edu )
18 * @date 2018
19 * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
20 * GNU General Public License
21 */
22
23#include "CoFusionProcessor.h"
24
25#include <unistd.h>
26
27#include <Eigen/Geometry>
28
29#include <pcl/filters/approximate_voxel_grid.h>
30
31#include <VisionX/interface/components/PointCloudAndImageAndCalibrationProviderInterface.h>
32#include <VisionX/interface/components/RGBDImageProvider.h>
34
35#include <Core/Model/Model.h>
36#include <GL/glew.h>
37#include <Image/IplImageAdaptor.h>
38#include <pangolin/pangolin.h>
39
40
41using namespace armarx;
42
43void
45{
46 imageProviderName = getProperty<std::string>("ImageProviderName").getValue();
47 usingImageProvider(imageProviderName);
48 //usingProxy(getProperty<std::string>("RobotStateComponentName").getValue());
49}
50
51void
53{
54 visionx::ImageProviderInfo imageProviderInfo = getImageProvider(imageProviderName);
55 imageProvider = getProxy<visionx::ImageProviderInterfacePrx>(imageProviderName);
56
57 int numImages = 2;
58 if (imageProviderInfo.numberImages != numImages)
59 {
60 ARMARX_ERROR << "Number of images provided by " << imageProviderName << " ("
61 << imageProviderInfo.numberImages
62 << ") does not match the expected number of images (" << numImages << ")";
63 }
64
65 {
66 std::lock_guard<std::mutex> lock(imagesMutex);
67
68 imageRgb.reset(visionx::tools::createByteImage(imageProviderInfo));
69 imageDepth.reset(visionx::tools::createByteImage(imageProviderInfo));
70 images[0] = imageRgb.get();
71 images[1] = imageDepth.get();
72 }
73}
74
75void
77{
78 std::lock_guard<std::mutex> lock(imagesMutex);
79
80 images[0] = nullptr;
81 images[1] = nullptr;
82 imageRgb.reset(nullptr);
83 imageDepth.reset(nullptr);
84
85 imageProvider = nullptr;
86}
87
88void
92
93void
98
99void
103
104void
106{
107 ImageProcessor::onInitComponent();
108 CapturingPointCloudProvider::onInitComponent();
109
110 framesBetweenUpdates = getProperty<int>("FramesBetweenUpdates").getValue();
111 downsamplingLeafSize = getProperty<float>("DownsamplingLeafSize").getValue();
112
113 framesSinceLastUpdate = framesBetweenUpdates; // trigger first update right away
114
115 resultPointCloud.reset(new pcl::PointCloud<OutPointT>());
116
117
118 // COFUSION PARAMS
119
120 cofusionParams.timeDelta = getProperty<int>("cofusion.TimeDelta").getValue();
121 cofusionParams.countThresh = getProperty<int>("cofusion.CountThresh").getValue();
122 cofusionParams.errThresh = getProperty<float>("cofusion.ErrThresh").getValue();
123 cofusionParams.covThresh = getProperty<float>("cofusion.CovThresh").getValue();
124 cofusionParams.closeLoops = getProperty<bool>("cofusion.CloseLoops").getValue();
125 cofusionParams.iclnuim = getProperty<bool>("cofusion.Iclnuim").getValue();
126 cofusionParams.reloc = getProperty<bool>("cofusion.Reloc").getValue();
127 cofusionParams.photoThresh = getProperty<float>("cofusion.PhotoThresh").getValue();
128 cofusionParams.initConfidenceGlobal =
129 getProperty<float>("cofusion.InitConfidenceGlobal").getValue();
130 cofusionParams.initConfidenceObject =
131 getProperty<float>("cofusion.InitConfidenceObject").getValue();
132 cofusionParams.depthCut = getProperty<float>("cofusion.DepthCut").getValue();
133 cofusionParams.icpThresh = getProperty<float>("cofusion.IcpThresh").getValue();
134 cofusionParams.fastOdom = getProperty<bool>("cofusion.FastOdom").getValue();
135 cofusionParams.fernThresh = getProperty<float>("cofusion.FernThresh").getValue();
136 cofusionParams.so3 = getProperty<bool>("cofusion.So3").getValue();
137 cofusionParams.frameToFrameRGB = getProperty<bool>("cofusion.FrameToFrameRGB").getValue();
138 cofusionParams.modelSpawnOffset = getProperty<unsigned>("cofusion.ModelSpawnOffset").getValue();
139 cofusionParams.matchingType =
140 getProperty<Model::MatchingType>("cofusion.MatchingType").getValue();
141 cofusionParams.exportDirectory =
142 getProperty<std::string>("cofusion.ExportDirectory").getValue();
143 cofusionParams.exportSegmentationResults =
144 getProperty<bool>("cofusion.ExportSegmentationResults").getValue();
145
146#if USE_MASKFUSION == 1
147 cofusionParams.usePrecomputedMasksOnly =
148 getProperty<bool>("cofusion.UsePrecomputedMasksOnly").getValue();
149 cofusionParams.frameQueueSize = getProperty<int>("cofusion.FrameQueueSize").getValue();
150#endif
151}
152
153void
155{
156
157
158 ImageProcessor::onConnectComponent();
159 CapturingPointCloudProvider::onConnectComponent();
160
161 //robotStateComponent = getProxy<RobotStateComponentInterfacePrx>(getProperty<std::string>("RobotStateComponentName").getValue());
162
163
164 visionx::StereoCalibrationProviderInterfacePrx calibrationProvider =
165 visionx::StereoCalibrationProviderInterfacePrx::checkedCast(imageProvider);
166
167 if (calibrationProvider)
168 {
169 ARMARX_LOG << "reading intrinsics from calibration provider";
170 visionx::StereoCalibration calibration = calibrationProvider->getStereoCalibration();
171
172 cofusionParams.fx = calibration.calibrationRight.cameraParam.focalLength[0];
173 cofusionParams.fy = calibration.calibrationRight.cameraParam.focalLength[1];
174 cofusionParams.cx = calibration.calibrationRight.cameraParam.principalPoint[0];
175 cofusionParams.cy = calibration.calibrationRight.cameraParam.principalPoint[1];
176
177 referenceFrameName = calibrationProvider->getReferenceFrame();
178 }
179 else
180 {
181 cofusionParams.fx = 525.0;
182 cofusionParams.fy = 525.0;
183 cofusionParams.cx = 320;
184 cofusionParams.cy = 240;
185 referenceFrameName = "unkown";
186 ARMARX_WARNING << "unable to get camera intrinsics. using dummy values";
187 }
188
189
190 visionx::ImageFormatInfo imageFormat = imageProvider->getImageFormat();
191
192 cofusionParams.width = imageFormat.dimension.width;
193 cofusionParams.height = imageFormat.dimension.height;
194
195
196 if (getProperty<bool>("ShowTimeStats").getValue())
197 {
198 if (stopwatchReportTask)
199 {
200 stopwatchReportTask->stop();
201 }
202 stopwatch.reset();
203 stopwatchReportTask = new PeriodicTask<CoFusionProcessor>(
204 this, &CoFusionProcessor::reportStopwatchStats, 10000);
205 stopwatchReportTask->start();
206 }
207
208 providePointCloud<OutPointT>(resultPointCloud);
209}
210
211void
213{
214 ImageProcessor::onDisconnectComponent();
215 CapturingPointCloudProvider::onDisconnectComponent();
216}
217
218void
220{
221 ImageProcessor::onExitComponent();
222 CapturingPointCloudProvider::onExitComponent();
223}
224
225void
226CoFusionProcessor::onNewModel(std::shared_ptr<Model> model)
227{
228
229 ARMARX_INFO << "new model";
230}
231
232void
233CoFusionProcessor::onInactiveModel(std::shared_ptr<Model> model)
234{
235 ARMARX_INFO << "inactive model";
236}
237
238void
240{
241 if (!waitForImages(imageProviderName))
242 {
243 ARMARX_WARNING << "Timeout or error in waitForImages(" << imageProviderName << ")";
244 return;
245 }
246
247 ARMARX_INFO << deactivateSpam() << "Processing input image ...";
248
249 std::lock_guard<std::mutex> lock(imagesMutex);
250
251 armarx::MetaInfoSizeBasePtr metaInfo;
252 // retrieve images
253 int numImages = getImages(imageProviderName, images, metaInfo);
254 if (numImages != 2)
255 {
256 ARMARX_WARNING << "Did not receive correct number of images! (Expected 2, but was "
257 << numImages << ".) Aborting!";
258 return;
259 }
260
261 if (!cofusion)
262 {
263 initializeCoFusion();
264 }
265
266 // create FrameData (CoFusion)
267 stopwatch.start("ImagePreprocessing");
268
269 FrameData frame = constructFrameData();
270
271 stopwatch.stop("ImagePreprocessing");
272
273 // FramedPoseBasePtr globalPose = robotStateComponent->getRobotSnapshotAtTimestamp(metaInfo->timeProvided)->getRobotNode(referenceFrameName)->getGlobalPose();
274
275 // do this for each incoming frame
276 stopwatch.start("CoFusion::ProcessFrame");
277
278#if USE_MASKFUSION == 1
279 std::shared_ptr<FrameData> framePtr = std::make_shared<FrameData>(frame);
280
281 cofusion->processFrame(framePtr);
282#else
283 cofusion->processFrame(frame);
284#endif
285
286 stopwatch.stop("CoFusion::ProcessFrame");
287
288
289 framesSinceLastUpdate++;
290 if (framesSinceLastUpdate >= framesBetweenUpdates)
291 {
292 updateSurfelMaps();
293 framesSinceLastUpdate = 0;
294
295 ARMARX_VERBOSE << "Stopwatch stats: \n" << stopwatch;
296 }
297}
298
299void
300CoFusionProcessor::initializeCoFusion()
301{
302 ARMARX_INFO << "Initializing CoFusion (resolution: " << cofusionParams.width << "x"
303 << cofusionParams.height << ")";
304
305 pangolin::Params windowParams;
306 windowParams.Set("SAMPLE_BUFFERS", 0);
307 windowParams.Set("SAMPLES", 0);
308
309 pangolin::CreateWindowAndBind(
310 "Main", cofusionParams.width, cofusionParams.height, windowParams);
311
312
313 GLenum glewInitError = glewInit();
314 if (glewInitError)
315 {
316 ARMARX_ERROR << "glewInit() failed with error: " << glewInitError;
317 }
318
319 int prog = glCreateProgram();
320 ARMARX_INFO << "Created gl program (" << prog << ")";
321
322
323 ARMARX_INFO << "Constructing CoFusion";
324 cofusion = cofusionParams.makeCoFusion();
325
326 ARMARX_INFO << "registering callbacks";
327 cofusion->addNewModelListener([this](std::shared_ptr<Model> m) { onNewModel(m); });
328 cofusion->addInactiveModelListener([this](std::shared_ptr<Model> m) { onInactiveModel(m); });
329
330#if USE_MASKFUSION == 1
331 cofusion->preallocateModels(getProperty<int>("cofusion.PreallocatedModelsCount").getValue());
332#endif
333}
334
335FrameData
336CoFusionProcessor::constructFrameData()
337{
338 bool copyData = false;
339 cv::Mat matRgb = cv::cvarrToMat(IplImageAdaptor::Adapt(imageRgb.get()), copyData);
340 cv::Mat matDepth = cv::cvarrToMat(IplImageAdaptor::Adapt(imageDepth.get()), copyData);
341
342
343 FrameData frame;
344
345 // color image is easy
346 frame.rgb = matRgb;
347
348 // depth image must be converted
349 int rows = matDepth.rows;
350 int cols = matDepth.cols;
351
352
353 // type is 32 bit flaot
354 cv::Mat matDepthCofusion(rows, cols, CV_32FC1);
355
356 for (int x = 0; x < rows; ++x)
357 {
358 for (int y = 0; y < cols; ++y)
359 {
360 // extract float from source image
361
362 /* Constructing code from from DepthImageProviderDynamicSimulation (ArmarXSimulation):
363 *
364 * float* depthBufferEndOfRow = depthBuffer.data() + width - 1;
365 * // ...
366 *
367 * std::int32_t value = boost::algorithm::clamp(0.f, depthBufferEndOfRow[-x], static_cast<float>(0xffffff));
368 * dImageRow[x * 3] = value & 0xff;
369 * dImageRow[x * 3 + 1] = (value >> 8) & 0xff;
370 * dImageRow[x * 3 + 2] = (value >> 16) & 0xff;
371 */
372
373 /*
374 * My reasoning:
375 * int32_t 'value' above should be casted semantically correctly from float to int
376 * (i.e. should contain the correct/rounded value, not the bit pattern).
377 * Therefore, we extract the values into a int32_t first, then cast it back to float.
378 */
379
380 cv::Vec3b depthPixel = matDepth.at<cv::Vec3b>(x, y);
381
382 int32_t value = 0;
383 value += depthPixel[0];
384 value += depthPixel[1] << 8;
385 value += depthPixel[2] << 16;
386
387 /*
388 // For use with row pointer
389 value += rowPtr[y * 3];
390 value += rowPtr[y * 3 + 1] << 8;
391 value += rowPtr[y * 3 + 2] << 16;
392 */
393
394 float depth = value;
395
396 // map millimeters to meters
397 depth /= 1000;
398
399 // set in new matrix
400 matDepthCofusion.at<float>(x, y) = depth;
401 }
402 }
403
404 frame.depth = matDepthCofusion; // in meters
405
406 return frame;
407}
408
409void
410CoFusionProcessor::updateSurfelMaps()
411{
412 std::list<std::shared_ptr<Model>>& models = cofusion->getModels();
413
414 if (models.empty())
415 {
416 ARMARX_WARNING << "Got no models from CoFusion.";
417 return;
418 }
419
420 std::lock_guard<std::mutex> lock(surfelMapMutex);
421
422 ARMARX_INFO << "Downloading surfel maps of " << models.size() << " models ...";
423 surfelMaps.clear();
424 for (std::shared_ptr<Model> const& model : models)
425 {
426 unsigned int id = model->getID();
427
428 ARMARX_VERBOSE << "Downloading surfel map of model " << id;
429 // SurfelMap contains unique_ptr's and therefore must be inserted directly
430
431#if 1
432 stopwatch.start("Model::downloadMap()");
433 surfelMaps[id] = model->downloadMap();
434
435
436 Model::SurfelMap& surfelMap = surfelMaps[id];
437 ARMARX_VERBOSE << "SurfelMap contains " << surfelMap.numPoints << " points "
438 << "(container size: " << surfelMap.data->size() << ")";
439
440 stopwatch.stop("Model::downloadMap()");
441#endif
442 }
443
444 newFrameAvailable = true;
445}
446
447void
449{
450}
451
452void
453CoFusionProcessor::startCaptureForNumFrames(Ice::Int numFrames, const Ice::Current& c)
454{
456 << "CoFusionProcessor does currently not support startCaptureForNumFrames(numFrames)";
457}
458
459void
463
464bool
466{
467 if (newFrameAvailable)
468 {
469 newFrameAvailable = false;
470 resultPointCloud.reset();
471 reconstructResultPointCloud();
472 providePointCloud<OutPointT>(resultPointCloud);
473 }
474 else
475 {
476 usleep(1000);
477 }
478 return true;
479}
480
481void
482CoFusionProcessor::reconstructResultPointCloud()
483{
484 std::lock_guard<std::mutex> lock(surfelMapMutex);
485
486 resultPointCloud.reset(new pcl::PointCloud<OutPointT>());
487
488 // Create point cloud
489 ARMARX_INFO << "Reconstructing point cloud from " << surfelMaps.size() << " SurfelMaps.";
490 stopwatch.start("PointCloud construction");
491
492 for (auto& it : surfelMaps)
493 {
494 unsigned int id = it.first;
495 Model::SurfelMap& surfelMap = it.second;
496
497
498 ARMARX_INFO << "SurfelMap of model " << id << " contains " << surfelMap.numPoints
499 << " points "
500 << "(container size: " << surfelMap.data->size() << ")";
501
502 /* From Model.h @ struct SurfelMap:
503 * "Returns a vector of 4-float tuples: position0, color0, normal0, ..., positionN, colorN, normalN
504 * Where
505 * position is (x,y,z,conf),
506 * color is (color encoded as a 24-bit integer, <unused>, initTime, timestamp),
507 * and normal is (x,y,z,radius)"
508 */
509
510 // try to reduce number of reallocations
511 resultPointCloud->reserve(resultPointCloud->size() + surfelMap.numPoints);
512
513 for (unsigned int i = 0; i < surfelMap.numPoints; ++i)
514 {
515 Eigen::Vector4f& position4f = (*surfelMap.data)[i * 3 + 0];
516 position4f.head<3>() *= 1000; // meters to millimeters
517
518 Eigen::Vector4f& color4f = (*surfelMap.data)[i * 3 + 1];
519 //Eigen::Vector4f& normal4f = (*surfelMap.data)[i * 3 + 2];
520
521 uint32_t color24b = static_cast<uint32_t>(color4f(0));
522 uchar r = (color24b >> 16) & 0xFF;
523 uchar g = (color24b >> 8) & 0xFF;
524 uchar b = (color24b >> 0) & 0xFF;
525
526 //Eigen::Vector3f normal = normal4f.head(3);
527 //float radius = normal4f(3);
528
529
530 //OutPointT point;
531 pcl::PointXYZRGBL point;
532 point.x = position4f(0);
533 point.y = position4f(1);
534 point.z = position4f(2);
535 point.r = r;
536 point.g = g;
537 point.b = b;
538 point.label = id; // model's id as label
539 resultPointCloud->push_back(point);
540 }
541
542 // todo connect to prior memory and add object
543 }
544
545 if (downsamplingLeafSize > 0.f)
546 {
547 pcl::PointCloud<OutPointT>::Ptr filtered(new pcl::PointCloud<OutPointT>());
548 std::size_t sizeBefore = resultPointCloud->size();
549
550 pcl::ApproximateVoxelGrid<OutPointT> grid;
551 grid.setLeafSize(downsamplingLeafSize, downsamplingLeafSize, downsamplingLeafSize);
552 grid.setInputCloud(resultPointCloud);
553 grid.filter(*filtered);
554 resultPointCloud.swap(filtered);
555
556 std::size_t sizeAfter = resultPointCloud->size();
557
558 ARMARX_INFO << "Downsampled to " << sizeAfter << " points (before: " << sizeBefore << ")";
559 }
560
561 stopwatch.stop("PointCloud construction");
562}
563
564visionx::MetaPointCloudFormatPtr
566{
567 visionx::MetaPointCloudFormatPtr info = new visionx::MetaPointCloudFormat();
568 info->capacity = 50 * 640 * 480 * sizeof(visionx::ColoredLabeledPoint3D);
569 info->size = info->capacity;
570 info->type = visionx::eColoredLabeledPoints;
571 return info;
572}
573
574void
575CoFusionProcessor::reportStopwatchStats()
576{
577 ARMARX_INFO << "Stopwatch stats:\n " << stopwatch;
578}
579
SpamFilterDataPtr deactivateSpam(SpamFilterDataPtr const &spamFilter, float deactivationDurationSec, const std::string &identifier, bool deactivate)
Definition Logging.cpp:75
constexpr T c
std::unique_ptr< CoFusion > makeCoFusion() const
virtual void onInitComponent() override
Pure virtual hook for the subclass.
virtual void onConnectImageProcessor() override
Allocates imageRgb and imageDepth and sets images[].
virtual bool doCapture() override
Main capturing function.
virtual void onExitCapturingPointCloudProvider() override
This is called when the Component::onExitComponent() setup is called.
virtual void onDisconnectComponent() override
Hook for subclass.
virtual void onExitImageProcessor() override
Exit the ImapeProcessor component.
void onNewModel(std::shared_ptr< Model > model)
virtual void onStartCapture(float framesPerSecond) override
This is called when the point cloud provider capturing has been started.
virtual visionx::MetaPointCloudFormatPtr getDefaultPointCloudFormat() override
default point cloud format used to initialize shared memory
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
virtual void process() override
Performs the image processing.
virtual void onInitCapturingPointCloudProvider() override
This is called when the Component::onInitComponent() is called.
virtual void onInitImageProcessor() override
Setup the vision component.
virtual void onConnectComponent() override
Pure virtual hook for the subclass.
virtual void onStopCapture() override
This is called when the point cloud provider capturing has been stopped.
virtual void onDisconnectImageProcessor() override
Frees imageRgb and imageDepth.
virtual void onExitComponent() override
Hook for subclass.
virtual void startCaptureForNumFrames(Ice::Int numFrames, const Ice::Current &c=Ice::emptyCurrent) override
void onInactiveModel(std::shared_ptr< Model > model)
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition Component.cpp:90
Property< PropertyType > getProperty(const std::string &name)
Ice::ObjectPrx getProxy(long timeoutMs=0, bool waitForScheduler=true) const
Returns the proxy of this object (optionally it waits for the proxy)
The periodic task executes one thread method repeatedly using the time period specified in the constr...
Property< PropertyType > getProperty(const std::string &name)
Property creation and retrieval.
void setPointCloudSyncMode(ImageSyncMode pointCloudSyncMode)
Sets the point cloud synchronization mode.
void usingImageProvider(std::string name)
Registers a delayed topic subscription and a delayed provider proxy retrieval which all will be avail...
bool waitForImages(int milliseconds=1000)
Wait for new images.
ImageProviderInfo getImageProvider(std::string name, ImageType destinationImageType=eRgb, bool waitForProxy=false)
Select an ImageProvider.
int getImages(CByteImage **ppImages)
Poll images from provider.
int numberImages
Number of images.
void providePointCloud(PointCloudPtrT pointCloudPtr)
offer the new point cloud.
#define ARMARX_INFO
The normal logging level.
Definition Logging.h:181
#define ARMARX_ERROR
The logging level for unexpected behaviour, that must be fixed.
Definition Logging.h:196
#define ARMARX_WARNING
The logging level for unexpected behaviour, but not a serious problem.
Definition Logging.h:193
#define ARMARX_LOG
Definition Logging.h:165
#define ARMARX_VERBOSE
The logging level for verbose information.
Definition Logging.h:187
T getValue(nlohmann::json &userConfig, nlohmann::json &defaultConfig, const std::string &entryName)
Definition utils.h:80
This file offers overloads of toIce() and fromIce() functions for STL container types.
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
std::shared_ptr< Value > value()
Definition cxxopts.hpp:855
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.