CoFusionProcessor.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * ArmarX is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * ArmarX is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program. If not, see <http://www.gnu.org/licenses/>.
15  *
16  * @package VisionX::ArmarXObjects::CoFusionProcessor
17  * @author Rainer Kartmann ( rainer dot kartmann at student dot kit dot edu )
18  * @date 2018
19  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
20  * GNU General Public License
21  */
22 
23 #include "CoFusionProcessor.h"
24 
25 #include <unistd.h>
26 
27 #include <Eigen/Geometry>
28 
29 #include <pcl/filters/approximate_voxel_grid.h>
30 
31 #include <VisionX/interface/components/PointCloudAndImageAndCalibrationProviderInterface.h>
32 #include <VisionX/interface/components/RGBDImageProvider.h>
34 
35 #include <Core/Model/Model.h>
36 #include <GL/glew.h>
37 #include <Image/IplImageAdaptor.h>
38 #include <pangolin/pangolin.h>
39 
40 
41 using namespace armarx;
42 
43 void
45 {
46  imageProviderName = getProperty<std::string>("ImageProviderName").getValue();
47  usingImageProvider(imageProviderName);
48  //usingProxy(getProperty<std::string>("RobotStateComponentName").getValue());
49 }
50 
51 void
53 {
54  visionx::ImageProviderInfo imageProviderInfo = getImageProvider(imageProviderName);
55  imageProvider = getProxy<visionx::ImageProviderInterfacePrx>(imageProviderName);
56 
57  int numImages = 2;
58  if (imageProviderInfo.numberImages != numImages)
59  {
60  ARMARX_ERROR << "Number of images provided by " << imageProviderName << " ("
61  << imageProviderInfo.numberImages
62  << ") does not match the expected number of images (" << numImages << ")";
63  }
64 
65  {
66  std::lock_guard<std::mutex> lock(imagesMutex);
67 
68  imageRgb.reset(visionx::tools::createByteImage(imageProviderInfo));
69  imageDepth.reset(visionx::tools::createByteImage(imageProviderInfo));
70  images[0] = imageRgb.get();
71  images[1] = imageDepth.get();
72  }
73 }
74 
75 void
77 {
78  std::lock_guard<std::mutex> lock(imagesMutex);
79 
80  images[0] = nullptr;
81  images[1] = nullptr;
82  imageRgb.reset(nullptr);
83  imageDepth.reset(nullptr);
84 
85  imageProvider = nullptr;
86 }
87 
88 void
90 {
91 }
92 
93 void
95 {
96  setPointCloudSyncMode(getProperty<visionx::ImageSyncMode>("PointCloudSyncMode").getValue());
97 }
98 
99 void
101 {
102 }
103 
104 void
106 {
107  ImageProcessor::onInitComponent();
108  CapturingPointCloudProvider::onInitComponent();
109 
110  framesBetweenUpdates = getProperty<int>("FramesBetweenUpdates").getValue();
111  downsamplingLeafSize = getProperty<float>("DownsamplingLeafSize").getValue();
112 
113  framesSinceLastUpdate = framesBetweenUpdates; // trigger first update right away
114 
115  resultPointCloud.reset(new pcl::PointCloud<OutPointT>());
116 
117 
118  // COFUSION PARAMS
119 
120  cofusionParams.timeDelta = getProperty<int>("cofusion.TimeDelta").getValue();
121  cofusionParams.countThresh = getProperty<int>("cofusion.CountThresh").getValue();
122  cofusionParams.errThresh = getProperty<float>("cofusion.ErrThresh").getValue();
123  cofusionParams.covThresh = getProperty<float>("cofusion.CovThresh").getValue();
124  cofusionParams.closeLoops = getProperty<bool>("cofusion.CloseLoops").getValue();
125  cofusionParams.iclnuim = getProperty<bool>("cofusion.Iclnuim").getValue();
126  cofusionParams.reloc = getProperty<bool>("cofusion.Reloc").getValue();
127  cofusionParams.photoThresh = getProperty<float>("cofusion.PhotoThresh").getValue();
128  cofusionParams.initConfidenceGlobal =
129  getProperty<float>("cofusion.InitConfidenceGlobal").getValue();
130  cofusionParams.initConfidenceObject =
131  getProperty<float>("cofusion.InitConfidenceObject").getValue();
132  cofusionParams.depthCut = getProperty<float>("cofusion.DepthCut").getValue();
133  cofusionParams.icpThresh = getProperty<float>("cofusion.IcpThresh").getValue();
134  cofusionParams.fastOdom = getProperty<bool>("cofusion.FastOdom").getValue();
135  cofusionParams.fernThresh = getProperty<float>("cofusion.FernThresh").getValue();
136  cofusionParams.so3 = getProperty<bool>("cofusion.So3").getValue();
137  cofusionParams.frameToFrameRGB = getProperty<bool>("cofusion.FrameToFrameRGB").getValue();
138  cofusionParams.modelSpawnOffset = getProperty<unsigned>("cofusion.ModelSpawnOffset").getValue();
139  cofusionParams.matchingType =
140  getProperty<Model::MatchingType>("cofusion.MatchingType").getValue();
141  cofusionParams.exportDirectory =
142  getProperty<std::string>("cofusion.ExportDirectory").getValue();
143  cofusionParams.exportSegmentationResults =
144  getProperty<bool>("cofusion.ExportSegmentationResults").getValue();
145 
146 #if USE_MASKFUSION == 1
147  cofusionParams.usePrecomputedMasksOnly =
148  getProperty<bool>("cofusion.UsePrecomputedMasksOnly").getValue();
149  cofusionParams.frameQueueSize = getProperty<int>("cofusion.FrameQueueSize").getValue();
150 #endif
151 }
152 
153 void
155 {
156 
157 
158  ImageProcessor::onConnectComponent();
159  CapturingPointCloudProvider::onConnectComponent();
160 
161  //robotStateComponent = getProxy<RobotStateComponentInterfacePrx>(getProperty<std::string>("RobotStateComponentName").getValue());
162 
163 
164  visionx::StereoCalibrationProviderInterfacePrx calibrationProvider =
165  visionx::StereoCalibrationProviderInterfacePrx::checkedCast(imageProvider);
166 
167  if (calibrationProvider)
168  {
169  ARMARX_LOG << "reading intrinsics from calibration provider";
170  visionx::StereoCalibration calibration = calibrationProvider->getStereoCalibration();
171 
172  cofusionParams.fx = calibration.calibrationRight.cameraParam.focalLength[0];
173  cofusionParams.fy = calibration.calibrationRight.cameraParam.focalLength[1];
174  cofusionParams.cx = calibration.calibrationRight.cameraParam.principalPoint[0];
175  cofusionParams.cy = calibration.calibrationRight.cameraParam.principalPoint[1];
176 
177  referenceFrameName = calibrationProvider->getReferenceFrame();
178  }
179  else
180  {
181  cofusionParams.fx = 525.0;
182  cofusionParams.fy = 525.0;
183  cofusionParams.cx = 320;
184  cofusionParams.cy = 240;
185  referenceFrameName = "unkown";
186  ARMARX_WARNING << "unable to get camera intrinsics. using dummy values";
187  }
188 
189 
190  visionx::ImageFormatInfo imageFormat = imageProvider->getImageFormat();
191 
192  cofusionParams.width = imageFormat.dimension.width;
193  cofusionParams.height = imageFormat.dimension.height;
194 
195 
196  if (getProperty<bool>("ShowTimeStats").getValue())
197  {
198  if (stopwatchReportTask)
199  {
200  stopwatchReportTask->stop();
201  }
202  stopwatch.reset();
203  stopwatchReportTask = new PeriodicTask<CoFusionProcessor>(
204  this, &CoFusionProcessor::reportStopwatchStats, 10000);
205  stopwatchReportTask->start();
206  }
207 
208  providePointCloud<OutPointT>(resultPointCloud);
209 }
210 
211 void
213 {
214  ImageProcessor::onDisconnectComponent();
215  CapturingPointCloudProvider::onDisconnectComponent();
216 }
217 
218 void
220 {
221  ImageProcessor::onExitComponent();
222  CapturingPointCloudProvider::onExitComponent();
223 }
224 
225 void
226 CoFusionProcessor::onNewModel(std::shared_ptr<Model> model)
227 {
228 
229  ARMARX_INFO << "new model";
230 }
231 
232 void
233 CoFusionProcessor::onInactiveModel(std::shared_ptr<Model> model)
234 {
235  ARMARX_INFO << "inactive model";
236 }
237 
238 void
240 {
241  if (!waitForImages(imageProviderName))
242  {
243  ARMARX_WARNING << "Timeout or error in waitForImages(" << imageProviderName << ")";
244  return;
245  }
246 
247  ARMARX_INFO << deactivateSpam() << "Processing input image ...";
248 
249  std::lock_guard<std::mutex> lock(imagesMutex);
250 
251  armarx::MetaInfoSizeBasePtr metaInfo;
252  // retrieve images
253  int numImages = getImages(imageProviderName, images, metaInfo);
254  if (numImages != 2)
255  {
256  ARMARX_WARNING << "Did not receive correct number of images! (Expected 2, but was "
257  << numImages << ".) Aborting!";
258  return;
259  }
260 
261  if (!cofusion)
262  {
263  initializeCoFusion();
264  }
265 
266  // create FrameData (CoFusion)
267  stopwatch.start("ImagePreprocessing");
268 
269  FrameData frame = constructFrameData();
270 
271  stopwatch.stop("ImagePreprocessing");
272 
273  // FramedPoseBasePtr globalPose = robotStateComponent->getRobotSnapshotAtTimestamp(metaInfo->timeProvided)->getRobotNode(referenceFrameName)->getGlobalPose();
274 
275  // do this for each incoming frame
276  stopwatch.start("CoFusion::ProcessFrame");
277 
278 #if USE_MASKFUSION == 1
279  std::shared_ptr<FrameData> framePtr = std::make_shared<FrameData>(frame);
280 
281  cofusion->processFrame(framePtr);
282 #else
283  cofusion->processFrame(frame);
284 #endif
285 
286  stopwatch.stop("CoFusion::ProcessFrame");
287 
288 
289  framesSinceLastUpdate++;
290  if (framesSinceLastUpdate >= framesBetweenUpdates)
291  {
292  updateSurfelMaps();
293  framesSinceLastUpdate = 0;
294 
295  ARMARX_VERBOSE << "Stopwatch stats: \n" << stopwatch;
296  }
297 }
298 
299 void
300 CoFusionProcessor::initializeCoFusion()
301 {
302  ARMARX_INFO << "Initializing CoFusion (resolution: " << cofusionParams.width << "x"
303  << cofusionParams.height << ")";
304 
305  pangolin::Params windowParams;
306  windowParams.Set("SAMPLE_BUFFERS", 0);
307  windowParams.Set("SAMPLES", 0);
308 
309  pangolin::CreateWindowAndBind(
310  "Main", cofusionParams.width, cofusionParams.height, windowParams);
311 
312 
313  GLenum glewInitError = glewInit();
314  if (glewInitError)
315  {
316  ARMARX_ERROR << "glewInit() failed with error: " << glewInitError;
317  }
318 
319  int prog = glCreateProgram();
320  ARMARX_INFO << "Created gl program (" << prog << ")";
321 
322 
323  ARMARX_INFO << "Constructing CoFusion";
324  cofusion = cofusionParams.makeCoFusion();
325 
326  ARMARX_INFO << "registering callbacks";
327  cofusion->addNewModelListener([this](std::shared_ptr<Model> m) { onNewModel(m); });
328  cofusion->addInactiveModelListener([this](std::shared_ptr<Model> m) { onInactiveModel(m); });
329 
330 #if USE_MASKFUSION == 1
331  cofusion->preallocateModels(getProperty<int>("cofusion.PreallocatedModelsCount").getValue());
332 #endif
333 }
334 
335 FrameData
336 CoFusionProcessor::constructFrameData()
337 {
338  bool copyData = false;
339  cv::Mat matRgb = cv::cvarrToMat(IplImageAdaptor::Adapt(imageRgb.get()), copyData);
340  cv::Mat matDepth = cv::cvarrToMat(IplImageAdaptor::Adapt(imageDepth.get()), copyData);
341 
342 
343  FrameData frame;
344 
345  // color image is easy
346  frame.rgb = matRgb;
347 
348  // depth image must be converted
349  int rows = matDepth.rows;
350  int cols = matDepth.cols;
351 
352 
353  // type is 32 bit flaot
354  cv::Mat matDepthCofusion(rows, cols, CV_32FC1);
355 
356  for (int x = 0; x < rows; ++x)
357  {
358  for (int y = 0; y < cols; ++y)
359  {
360  // extract float from source image
361 
362  /* Constructing code from from DepthImageProviderDynamicSimulation (ArmarXSimulation):
363  *
364  * float* depthBufferEndOfRow = depthBuffer.data() + width - 1;
365  * // ...
366  *
367  * std::int32_t value = boost::algorithm::clamp(0.f, depthBufferEndOfRow[-x], static_cast<float>(0xffffff));
368  * dImageRow[x * 3] = value & 0xff;
369  * dImageRow[x * 3 + 1] = (value >> 8) & 0xff;
370  * dImageRow[x * 3 + 2] = (value >> 16) & 0xff;
371  */
372 
373  /*
374  * My reasoning:
375  * int32_t 'value' above should be casted semantically correctly from float to int
376  * (i.e. should contain the correct/rounded value, not the bit pattern).
377  * Therefore, we extract the values into a int32_t first, then cast it back to float.
378  */
379 
380  cv::Vec3b depthPixel = matDepth.at<cv::Vec3b>(x, y);
381 
382  int32_t value = 0;
383  value += depthPixel[0];
384  value += depthPixel[1] << 8;
385  value += depthPixel[2] << 16;
386 
387  /*
388  // For use with row pointer
389  value += rowPtr[y * 3];
390  value += rowPtr[y * 3 + 1] << 8;
391  value += rowPtr[y * 3 + 2] << 16;
392  */
393 
394  float depth = value;
395 
396  // map millimeters to meters
397  depth /= 1000;
398 
399  // set in new matrix
400  matDepthCofusion.at<float>(x, y) = depth;
401  }
402  }
403 
404  frame.depth = matDepthCofusion; // in meters
405 
406  return frame;
407 }
408 
409 void
410 CoFusionProcessor::updateSurfelMaps()
411 {
412  std::list<std::shared_ptr<Model>>& models = cofusion->getModels();
413 
414  if (models.empty())
415  {
416  ARMARX_WARNING << "Got no models from CoFusion.";
417  return;
418  }
419 
420  std::lock_guard<std::mutex> lock(surfelMapMutex);
421 
422  ARMARX_INFO << "Downloading surfel maps of " << models.size() << " models ...";
423  surfelMaps.clear();
424  for (std::shared_ptr<Model> const& model : models)
425  {
426  unsigned int id = model->getID();
427 
428  ARMARX_VERBOSE << "Downloading surfel map of model " << id;
429  // SurfelMap contains unique_ptr's and therefore must be inserted directly
430 
431 #if 1
432  stopwatch.start("Model::downloadMap()");
433  surfelMaps[id] = model->downloadMap();
434 
435 
436  Model::SurfelMap& surfelMap = surfelMaps[id];
437  ARMARX_VERBOSE << "SurfelMap contains " << surfelMap.numPoints << " points "
438  << "(container size: " << surfelMap.data->size() << ")";
439 
440  stopwatch.stop("Model::downloadMap()");
441 #endif
442  }
443 
444  newFrameAvailable = true;
445 }
446 
447 void
448 CoFusionProcessor::onStartCapture(float framesPerSecond)
449 {
450 }
451 
452 void
454 {
456  << "CoFusionProcessor does currently not support startCaptureForNumFrames(numFrames)";
457 }
458 
459 void
461 {
462 }
463 
464 bool
466 {
467  if (newFrameAvailable)
468  {
469  newFrameAvailable = false;
470  resultPointCloud.reset();
471  reconstructResultPointCloud();
472  providePointCloud<OutPointT>(resultPointCloud);
473  }
474  else
475  {
476  usleep(1000);
477  }
478  return true;
479 }
480 
481 void
482 CoFusionProcessor::reconstructResultPointCloud()
483 {
484  std::lock_guard<std::mutex> lock(surfelMapMutex);
485 
486  resultPointCloud.reset(new pcl::PointCloud<OutPointT>());
487 
488  // Create point cloud
489  ARMARX_INFO << "Reconstructing point cloud from " << surfelMaps.size() << " SurfelMaps.";
490  stopwatch.start("PointCloud construction");
491 
492  for (auto& it : surfelMaps)
493  {
494  unsigned int id = it.first;
495  Model::SurfelMap& surfelMap = it.second;
496 
497 
498  ARMARX_INFO << "SurfelMap of model " << id << " contains " << surfelMap.numPoints
499  << " points "
500  << "(container size: " << surfelMap.data->size() << ")";
501 
502  /* From Model.h @ struct SurfelMap:
503  * "Returns a vector of 4-float tuples: position0, color0, normal0, ..., positionN, colorN, normalN
504  * Where
505  * position is (x,y,z,conf),
506  * color is (color encoded as a 24-bit integer, <unused>, initTime, timestamp),
507  * and normal is (x,y,z,radius)"
508  */
509 
510  // try to reduce number of reallocations
511  resultPointCloud->reserve(resultPointCloud->size() + surfelMap.numPoints);
512 
513  for (unsigned int i = 0; i < surfelMap.numPoints; ++i)
514  {
515  Eigen::Vector4f& position4f = (*surfelMap.data)[i * 3 + 0];
516  position4f.head<3>() *= 1000; // meters to millimeters
517 
518  Eigen::Vector4f& color4f = (*surfelMap.data)[i * 3 + 1];
519  //Eigen::Vector4f& normal4f = (*surfelMap.data)[i * 3 + 2];
520 
521  uint32_t color24b = static_cast<uint32_t>(color4f(0));
522  uchar r = (color24b >> 16) & 0xFF;
523  uchar g = (color24b >> 8) & 0xFF;
524  uchar b = (color24b >> 0) & 0xFF;
525 
526  //Eigen::Vector3f normal = normal4f.head(3);
527  //float radius = normal4f(3);
528 
529 
530  //OutPointT point;
531  pcl::PointXYZRGBL point;
532  point.x = position4f(0);
533  point.y = position4f(1);
534  point.z = position4f(2);
535  point.r = r;
536  point.g = g;
537  point.b = b;
538  point.label = id; // model's id as label
539  resultPointCloud->push_back(point);
540  }
541 
542  // todo connect to prior memory and add object
543  }
544 
545  if (downsamplingLeafSize > 0.f)
546  {
547  pcl::PointCloud<OutPointT>::Ptr filtered(new pcl::PointCloud<OutPointT>());
548  std::size_t sizeBefore = resultPointCloud->size();
549 
550  pcl::ApproximateVoxelGrid<OutPointT> grid;
551  grid.setLeafSize(downsamplingLeafSize, downsamplingLeafSize, downsamplingLeafSize);
552  grid.setInputCloud(resultPointCloud);
553  grid.filter(*filtered);
554  resultPointCloud.swap(filtered);
555 
556  std::size_t sizeAfter = resultPointCloud->size();
557 
558  ARMARX_INFO << "Downsampled to " << sizeAfter << " points (before: " << sizeBefore << ")";
559  }
560 
561  stopwatch.stop("PointCloud construction");
562 }
563 
564 visionx::MetaPointCloudFormatPtr
566 {
567  visionx::MetaPointCloudFormatPtr info = new visionx::MetaPointCloudFormat();
568  info->capacity = 50 * 640 * 480 * sizeof(visionx::ColoredLabeledPoint3D);
569  info->size = info->capacity;
570  info->type = visionx::eColoredLabeledPoints;
571  return info;
572 }
573 
574 void
575 CoFusionProcessor::reportStopwatchStats()
576 {
577  ARMARX_INFO << "Stopwatch stats:\n " << stopwatch;
578 }
579 
582 {
585 }
ARMARX_VERBOSE
#define ARMARX_VERBOSE
Definition: Logging.h:187
FrameData
Definition: TactileSensor.h:66
armarx::CoFusionParams::iclnuim
bool iclnuim
Definition: CoFusionParams.h:30
armarx::CoFusionParams::errThresh
float errThresh
Definition: CoFusionParams.h:27
visionx::ImageProviderInfo::numberImages
int numberImages
Number of images.
Definition: ImageProcessor.h:519
armarx::CoFusionParams::countThresh
int countThresh
Definition: CoFusionParams.h:26
armarx::CoFusionProcessor::onConnectComponent
virtual void onConnectComponent() override
Definition: CoFusionProcessor.cpp:154
armarx::CoFusionProcessor::onDisconnectImageProcessor
virtual void onDisconnectImageProcessor() override
Frees imageRgb and imageDepth.
Definition: CoFusionProcessor.cpp:76
visionx::CapturingPointCloudProvider::setPointCloudSyncMode
void setPointCloudSyncMode(ImageSyncMode pointCloudSyncMode)
Sets the point cloud synchronization mode.
Definition: CapturingPointCloudProvider.cpp:198
armarx::CoFusionParams::width
int width
Definition: CoFusionParams.h:47
armarx::CoFusionProcessor::startCaptureForNumFrames
virtual void startCaptureForNumFrames(Ice::Int numFrames, const Ice::Current &c=Ice::emptyCurrent) override
Definition: CoFusionProcessor.cpp:453
armarx::CoFusionParams::closeLoops
bool closeLoops
Definition: CoFusionParams.h:29
visionx::ImageProcessor::getImageProvider
ImageProviderInfo getImageProvider(std::string name, ImageType destinationImageType=eRgb, bool waitForProxy=false)
Select an ImageProvider.
Definition: ImageProcessor.cpp:167
armarx::CoFusionParams::timeDelta
int timeDelta
Definition: CoFusionParams.h:25
armarx::CoFusionParams::initConfidenceGlobal
float initConfidenceGlobal
Definition: CoFusionParams.h:33
c
constexpr T c
Definition: UnscentedKalmanFilterTest.cpp:46
armarx::CoFusionProcessor::createPropertyDefinitions
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
Definition: CoFusionProcessor.cpp:581
armarx::CoFusionParams::covThresh
float covThresh
Definition: CoFusionParams.h:28
armarx::CoFusionParams::fastOdom
bool fastOdom
Definition: CoFusionParams.h:37
armarx::CoFusionProcessor::process
virtual void process() override
Performs the image processing.
Definition: CoFusionProcessor.cpp:239
armarx::CoFusionParams::modelSpawnOffset
unsigned modelSpawnOffset
Definition: CoFusionParams.h:41
visionx::tools::createByteImage
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.
armarx::CoFusionProcessor::onDisconnectComponent
virtual void onDisconnectComponent() override
Hook for subclass.
Definition: CoFusionProcessor.cpp:212
visionx::ImageProviderInfo
Definition: ImageProcessor.h:479
armarx::CoFusionParams::depthCut
float depthCut
Definition: CoFusionParams.h:35
armarx::CoFusionProcessor::onStopCapture
virtual void onStopCapture() override
This is called when the point cloud provider capturing has been stopped.
Definition: CoFusionProcessor.cpp:460
armarx::CoFusionProcessor::doCapture
virtual bool doCapture() override
Main capturing function.
Definition: CoFusionProcessor.cpp:465
armarx::CoFusionProcessor::onInitCapturingPointCloudProvider
virtual void onInitCapturingPointCloudProvider() override
This is called when the Component::onInitComponent() is called.
Definition: CoFusionProcessor.cpp:94
armarx::CoFusionProcessor::onInitComponent
virtual void onInitComponent() override
Definition: CoFusionProcessor.cpp:105
cxxopts::value
std::shared_ptr< Value > value()
Definition: cxxopts.hpp:855
armarx::CoFusionProcessor::onStartCapture
virtual void onStartCapture(float framesPerSecond) override
This is called when the point cloud provider capturing has been started.
Definition: CoFusionProcessor.cpp:448
armarx::CoFusionProcessor::getDefaultPointCloudFormat
virtual visionx::MetaPointCloudFormatPtr getDefaultPointCloudFormat() override
default point cloud format used to initialize shared memory
Definition: CoFusionProcessor.cpp:565
armarx::CoFusionParams::fy
float fy
Definition: CoFusionParams.h:50
armarx::CoFusionParams::initConfidenceObject
float initConfidenceObject
Definition: CoFusionParams.h:34
armarx::CoFusionProcessor::onNewModel
void onNewModel(std::shared_ptr< Model > model)
Definition: CoFusionProcessor.cpp:226
armarx::CoFusionParams::fernThresh
float fernThresh
Definition: CoFusionParams.h:38
armarx::CoFusionParams::photoThresh
float photoThresh
Definition: CoFusionParams.h:32
stopwatch
Definition: Stopwatch.cpp:3
armarx::CoFusionProcessor::onExitCapturingPointCloudProvider
virtual void onExitCapturingPointCloudProvider() override
This is called when the Component::onExitComponent() setup is called.
Definition: CoFusionProcessor.cpp:100
armarx::CoFusionProcessor::onExitImageProcessor
virtual void onExitImageProcessor() override
Exit the ImapeProcessor component.
Definition: CoFusionProcessor.cpp:89
visionx::ImageProcessor::usingImageProvider
void usingImageProvider(std::string name)
Registers a delayed topic subscription and a delayed provider proxy retrieval which all will be avail...
Definition: ImageProcessor.cpp:128
armarx::CoFusionParams::exportSegmentationResults
bool exportSegmentationResults
Definition: CoFusionParams.h:45
armarx::CoFusionParams::fx
float fx
Definition: CoFusionParams.h:49
ARMARX_ERROR
#define ARMARX_ERROR
Definition: Logging.h:196
visionx::ImageProcessor::getImages
int getImages(CByteImage **ppImages)
Poll images from provider.
Definition: ImageProcessor.cpp:395
armarx::CoFusionParams::cx
float cx
Definition: CoFusionParams.h:51
ARMARX_LOG
#define ARMARX_LOG
Definition: Logging.h:165
armarx::CoFusionParams::exportDirectory
std::string exportDirectory
Definition: CoFusionParams.h:44
armarx::CoFusionParams::matchingType
Model::MatchingType matchingType
Definition: CoFusionParams.h:42
armarx::CoFusionProcessor::onInactiveModel
void onInactiveModel(std::shared_ptr< Model > model)
Definition: CoFusionProcessor.cpp:233
armarx::Component::getConfigIdentifier
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition: Component.cpp:79
armarx::CoFusionProcessor::onExitComponent
virtual void onExitComponent() override
Definition: CoFusionProcessor.cpp:219
armarx::CoFusionParams::height
int height
Definition: CoFusionParams.h:48
ARMARX_INFO
#define ARMARX_INFO
Definition: Logging.h:181
armarx::CoFusionParams::reloc
bool reloc
Definition: CoFusionParams.h:31
IceUtil::Handle< class PropertyDefinitionContainer >
ImageUtil.h
armarx::VariantType::Int
const VariantTypeId Int
Definition: Variant.h:917
armarx::Logging::deactivateSpam
SpamFilterDataPtr deactivateSpam(float deactivationDurationSec=10.0f, const std::string &identifier="", bool deactivate=true) const
disables the logging for the current line for the given amount of seconds.
Definition: Logging.cpp:99
armarx::CoFusionParams::cy
float cy
Definition: CoFusionParams.h:52
armarx::CoFusionProcessor::onInitImageProcessor
virtual void onInitImageProcessor() override
Setup the vision component.
Definition: CoFusionProcessor.cpp:44
armarx::PeriodicTask
Definition: ArmarXManager.h:70
ARMARX_WARNING
#define ARMARX_WARNING
Definition: Logging.h:193
armarx::PropertyDefinitionsPtr
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
Definition: forward_declarations.h:35
armarx::CoFusionParams::makeCoFusion
std::unique_ptr< CoFusion > makeCoFusion() const
Definition: CoFusionParams.cpp:51
armarx::CoFusionProcessor::onConnectImageProcessor
virtual void onConnectImageProcessor() override
Allocates imageRgb and imageDepth and sets images[].
Definition: CoFusionProcessor.cpp:52
armarx::CoFusionProcessorPropertyDefinitions
Definition: CoFusionProcessor.h:53
armarx
This file offers overloads of toIce() and fromIce() functions for STL container types.
Definition: ArmarXTimeserver.cpp:27
armarx::CoFusionParams::so3
bool so3
Definition: CoFusionParams.h:39
CoFusionProcessor.h
armarx::CoFusionParams::icpThresh
float icpThresh
Definition: CoFusionParams.h:36
armarx::CoFusionParams::frameToFrameRGB
bool frameToFrameRGB
Definition: CoFusionParams.h:40
visionx::ImageProcessor::waitForImages
bool waitForImages(int milliseconds=1000)
Wait for new images.
Definition: ImageProcessor.cpp:309