CoFusionProcessor.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * ArmarX is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * ArmarX is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program. If not, see <http://www.gnu.org/licenses/>.
15  *
16  * @package VisionX::ArmarXObjects::CoFusionProcessor
17  * @author Rainer Kartmann ( rainer dot kartmann at student dot kit dot edu )
18  * @date 2018
19  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
20  * GNU General Public License
21  */
22 
23 #include "CoFusionProcessor.h"
24 
25 #include <pcl/filters/approximate_voxel_grid.h>
26 
27 #include <GL/glew.h>
28 #include <pangolin/pangolin.h>
29 
30 #include <Image/IplImageAdaptor.h>
31 
33 
34 #include <VisionX/interface/components/PointCloudAndImageAndCalibrationProviderInterface.h>
35 #include <VisionX/interface/components/RGBDImageProvider.h>
36 #include <unistd.h>
37 #include <Eigen/Geometry>
38 
39 #include <Core/Model/Model.h>
40 
41 
42 using namespace armarx;
43 
44 
46 {
47  imageProviderName = getProperty<std::string>("ImageProviderName").getValue();
48  usingImageProvider(imageProviderName);
49  //usingProxy(getProperty<std::string>("RobotStateComponentName").getValue());
50 
51 }
52 
53 
55 {
56  visionx::ImageProviderInfo imageProviderInfo = getImageProvider(imageProviderName);
57  imageProvider = getProxy<visionx::ImageProviderInterfacePrx>(imageProviderName);
58 
59  int numImages = 2;
60  if (imageProviderInfo.numberImages != numImages)
61  {
62  ARMARX_ERROR << "Number of images provided by " << imageProviderName << " (" << imageProviderInfo.numberImages
63  << ") does not match the expected number of images (" << numImages << ")";
64  }
65 
66  {
67  std::lock_guard<std::mutex> lock(imagesMutex);
68 
69  imageRgb.reset(visionx::tools::createByteImage(imageProviderInfo));
70  imageDepth.reset(visionx::tools::createByteImage(imageProviderInfo));
71  images[0] = imageRgb.get();
72  images[1] = imageDepth.get();
73  }
74 }
75 
77 {
78  std::lock_guard<std::mutex> lock(imagesMutex);
79 
80  images[0] = nullptr;
81  images[1] = nullptr;
82  imageRgb.reset(nullptr);
83  imageDepth.reset(nullptr);
84 
85  imageProvider = nullptr;
86 }
87 
89 {
90 
91 }
92 
93 
95 {
96  setPointCloudSyncMode(getProperty<visionx::ImageSyncMode>("PointCloudSyncMode").getValue());
97 }
98 
100 {
101 
102 }
103 
104 
106 {
107  ImageProcessor::onInitComponent();
108  CapturingPointCloudProvider::onInitComponent();
109 
110  framesBetweenUpdates = getProperty<int>("FramesBetweenUpdates").getValue();
111  downsamplingLeafSize = getProperty<float>("DownsamplingLeafSize").getValue();
112 
113  framesSinceLastUpdate = framesBetweenUpdates; // trigger first update right away
114 
115  resultPointCloud.reset(new pcl::PointCloud<OutPointT>());
116 
117 
118  // COFUSION PARAMS
119 
120  cofusionParams.timeDelta = getProperty<int>("cofusion.TimeDelta").getValue();
121  cofusionParams.countThresh = getProperty<int>("cofusion.CountThresh").getValue();
122  cofusionParams.errThresh = getProperty<float>("cofusion.ErrThresh").getValue();
123  cofusionParams.covThresh = getProperty<float>("cofusion.CovThresh").getValue();
124  cofusionParams.closeLoops = getProperty<bool>("cofusion.CloseLoops").getValue();
125  cofusionParams.iclnuim = getProperty<bool>("cofusion.Iclnuim").getValue();
126  cofusionParams.reloc = getProperty<bool>("cofusion.Reloc").getValue();
127  cofusionParams.photoThresh = getProperty<float>("cofusion.PhotoThresh").getValue();
128  cofusionParams.initConfidenceGlobal = getProperty<float>("cofusion.InitConfidenceGlobal").getValue();
129  cofusionParams.initConfidenceObject = getProperty<float>("cofusion.InitConfidenceObject").getValue();
130  cofusionParams.depthCut = getProperty<float>("cofusion.DepthCut").getValue();
131  cofusionParams.icpThresh = getProperty<float>("cofusion.IcpThresh").getValue();
132  cofusionParams.fastOdom = getProperty<bool>("cofusion.FastOdom").getValue();
133  cofusionParams.fernThresh = getProperty<float>("cofusion.FernThresh").getValue();
134  cofusionParams.so3 = getProperty<bool>("cofusion.So3").getValue();
135  cofusionParams.frameToFrameRGB = getProperty<bool>("cofusion.FrameToFrameRGB").getValue();
136  cofusionParams.modelSpawnOffset = getProperty<unsigned>("cofusion.ModelSpawnOffset").getValue();
137  cofusionParams.matchingType = getProperty<Model::MatchingType>("cofusion.MatchingType").getValue();
138  cofusionParams.exportDirectory = getProperty<std::string>("cofusion.ExportDirectory").getValue();
139  cofusionParams.exportSegmentationResults = getProperty<bool>("cofusion.ExportSegmentationResults").getValue();
140 
141 #if USE_MASKFUSION == 1
142  cofusionParams.usePrecomputedMasksOnly = getProperty<bool>("cofusion.UsePrecomputedMasksOnly").getValue();
143  cofusionParams.frameQueueSize = getProperty<int>("cofusion.FrameQueueSize").getValue();
144 #endif
145 }
146 
148 {
149 
150 
151  ImageProcessor::onConnectComponent();
152  CapturingPointCloudProvider::onConnectComponent();
153 
154  //robotStateComponent = getProxy<RobotStateComponentInterfacePrx>(getProperty<std::string>("RobotStateComponentName").getValue());
155 
156 
157  visionx::StereoCalibrationProviderInterfacePrx calibrationProvider = visionx::StereoCalibrationProviderInterfacePrx::checkedCast(imageProvider);
158 
159  if (calibrationProvider)
160  {
161  ARMARX_LOG << "reading intrinsics from calibration provider";
162  visionx::StereoCalibration calibration = calibrationProvider->getStereoCalibration();
163 
164  cofusionParams.fx = calibration.calibrationRight.cameraParam.focalLength[0];
165  cofusionParams.fy = calibration.calibrationRight.cameraParam.focalLength[1];
166  cofusionParams.cx = calibration.calibrationRight.cameraParam.principalPoint[0];
167  cofusionParams.cy = calibration.calibrationRight.cameraParam.principalPoint[1];
168 
169  referenceFrameName = calibrationProvider->getReferenceFrame();
170  }
171  else
172  {
173  cofusionParams.fx = 525.0;
174  cofusionParams.fy = 525.0;
175  cofusionParams.cx = 320;
176  cofusionParams.cy = 240;
177  referenceFrameName = "unkown";
178  ARMARX_WARNING << "unable to get camera intrinsics. using dummy values";
179  }
180 
181 
182  visionx::ImageFormatInfo imageFormat = imageProvider->getImageFormat();
183 
184  cofusionParams.width = imageFormat.dimension.width;
185  cofusionParams.height = imageFormat.dimension.height;
186 
187 
188  if (getProperty<bool>("ShowTimeStats").getValue())
189  {
190  if (stopwatchReportTask)
191  {
192  stopwatchReportTask->stop();
193  }
194  stopwatch.reset();
195  stopwatchReportTask = new PeriodicTask<CoFusionProcessor>(this, &CoFusionProcessor::reportStopwatchStats, 10000);
196  stopwatchReportTask->start();
197  }
198 
199  providePointCloud<OutPointT>(resultPointCloud);
200 
201 }
202 
204 {
205  ImageProcessor::onDisconnectComponent();
206  CapturingPointCloudProvider::onDisconnectComponent();
207 }
208 
210 {
211  ImageProcessor::onExitComponent();
212  CapturingPointCloudProvider::onExitComponent();
213 }
214 
215 void CoFusionProcessor::onNewModel(std::shared_ptr<Model> model)
216 {
217 
218  ARMARX_INFO << "new model";
219 }
220 
221 void CoFusionProcessor::onInactiveModel(std::shared_ptr<Model> model)
222 {
223  ARMARX_INFO << "inactive model";
224 }
225 
226 
227 
229 {
230  if (!waitForImages(imageProviderName))
231  {
232  ARMARX_WARNING << "Timeout or error in waitForImages(" << imageProviderName << ")";
233  return;
234  }
235 
236  ARMARX_INFO << deactivateSpam() << "Processing input image ...";
237 
238  std::lock_guard<std::mutex> lock(imagesMutex);
239 
240  armarx::MetaInfoSizeBasePtr metaInfo;
241  // retrieve images
242  int numImages = getImages(imageProviderName, images, metaInfo);
243  if (numImages != 2)
244  {
245  ARMARX_WARNING << "Did not receive correct number of images! (Expected 2, but was "
246  << numImages << ".) Aborting!";
247  return;
248  }
249 
250  if (!cofusion)
251  {
252  initializeCoFusion();
253  }
254 
255  // create FrameData (CoFusion)
256  stopwatch.start("ImagePreprocessing");
257 
258  FrameData frame = constructFrameData();
259 
260  stopwatch.stop("ImagePreprocessing");
261 
262  // FramedPoseBasePtr globalPose = robotStateComponent->getRobotSnapshotAtTimestamp(metaInfo->timeProvided)->getRobotNode(referenceFrameName)->getGlobalPose();
263 
264  // do this for each incoming frame
265  stopwatch.start("CoFusion::ProcessFrame");
266 
267 #if USE_MASKFUSION == 1
268  std::shared_ptr<FrameData> framePtr = std::make_shared<FrameData>(frame);
269 
270  cofusion->processFrame(framePtr);
271 #else
272  cofusion->processFrame(frame);
273 #endif
274 
275  stopwatch.stop("CoFusion::ProcessFrame");
276 
277 
278  framesSinceLastUpdate++;
279  if (framesSinceLastUpdate >= framesBetweenUpdates)
280  {
281  updateSurfelMaps();
282  framesSinceLastUpdate = 0;
283 
284  ARMARX_VERBOSE << "Stopwatch stats: \n" << stopwatch;
285  }
286 
287 
288 }
289 
290 
291 void CoFusionProcessor::initializeCoFusion()
292 {
293  ARMARX_INFO << "Initializing CoFusion (resolution: " << cofusionParams.width << "x" << cofusionParams.height << ")";
294 
295  pangolin::Params windowParams;
296  windowParams.Set("SAMPLE_BUFFERS", 0);
297  windowParams.Set("SAMPLES", 0);
298 
299  pangolin::CreateWindowAndBind("Main", cofusionParams.width, cofusionParams.height, windowParams);
300 
301 
302  GLenum glewInitError = glewInit();
303  if (glewInitError)
304  {
305  ARMARX_ERROR << "glewInit() failed with error: " << glewInitError;
306  }
307 
308  int prog = glCreateProgram();
309  ARMARX_INFO << "Created gl program (" << prog << ")";
310 
311 
312  ARMARX_INFO << "Constructing CoFusion";
313  cofusion = cofusionParams.makeCoFusion();
314 
315  ARMARX_INFO << "registering callbacks";
316  cofusion->addNewModelListener([this](std::shared_ptr<Model> m)
317  {
318  onNewModel(m);
319  });
320  cofusion->addInactiveModelListener([this](std::shared_ptr<Model> m)
321  {
322  onInactiveModel(m);
323  });
324 
325 #if USE_MASKFUSION == 1
326  cofusion->preallocateModels(getProperty<int>("cofusion.PreallocatedModelsCount").getValue());
327 #endif
328 }
329 
330 
331 FrameData CoFusionProcessor::constructFrameData()
332 {
333  bool copyData = false;
334  cv::Mat matRgb = cv::cvarrToMat(IplImageAdaptor::Adapt(imageRgb.get()), copyData);
335  cv::Mat matDepth = cv::cvarrToMat(IplImageAdaptor::Adapt(imageDepth.get()), copyData);
336 
337 
338  FrameData frame;
339 
340  // color image is easy
341  frame.rgb = matRgb;
342 
343  // depth image must be converted
344  int rows = matDepth.rows;
345  int cols = matDepth.cols;
346 
347 
348  // type is 32 bit flaot
349  cv::Mat matDepthCofusion(rows, cols, CV_32FC1);
350 
351  for (int x = 0; x < rows; ++x)
352  {
353  for (int y = 0; y < cols; ++y)
354  {
355  // extract float from source image
356 
357  /* Constructing code from from DepthImageProviderDynamicSimulation (ArmarXSimulation):
358  *
359  * float* depthBufferEndOfRow = depthBuffer.data() + width - 1;
360  * // ...
361  *
362  * std::int32_t value = boost::algorithm::clamp(0.f, depthBufferEndOfRow[-x], static_cast<float>(0xffffff));
363  * dImageRow[x * 3] = value & 0xff;
364  * dImageRow[x * 3 + 1] = (value >> 8) & 0xff;
365  * dImageRow[x * 3 + 2] = (value >> 16) & 0xff;
366  */
367 
368  /*
369  * My reasoning:
370  * int32_t 'value' above should be casted semantically correctly from float to int
371  * (i.e. should contain the correct/rounded value, not the bit pattern).
372  * Therefore, we extract the values into a int32_t first, then cast it back to float.
373  */
374 
375  cv::Vec3b depthPixel = matDepth.at<cv::Vec3b>(x, y);
376 
377  int32_t value = 0;
378  value += depthPixel[0];
379  value += depthPixel[1] << 8;
380  value += depthPixel[2] << 16;
381 
382  /*
383  // For use with row pointer
384  value += rowPtr[y * 3];
385  value += rowPtr[y * 3 + 1] << 8;
386  value += rowPtr[y * 3 + 2] << 16;
387  */
388 
389  float depth = value;
390 
391  // map millimeters to meters
392  depth /= 1000;
393 
394  // set in new matrix
395  matDepthCofusion.at<float>(x, y) = depth;
396  }
397  }
398 
399  frame.depth = matDepthCofusion; // in meters
400 
401  return frame;
402 }
403 
404 
405 
406 void CoFusionProcessor::updateSurfelMaps()
407 {
408  std::list<std::shared_ptr<Model>>& models = cofusion->getModels();
409 
410  if (models.empty())
411  {
412  ARMARX_WARNING << "Got no models from CoFusion.";
413  return;
414  }
415 
416  std::lock_guard<std::mutex> lock(surfelMapMutex);
417 
418  ARMARX_INFO << "Downloading surfel maps of " << models.size() << " models ...";
419  surfelMaps.clear();
420  for (std::shared_ptr<Model> const& model : models)
421  {
422  unsigned int id = model->getID();
423 
424  ARMARX_VERBOSE << "Downloading surfel map of model " << id;
425  // SurfelMap contains unique_ptr's and therefore must be inserted directly
426 
427 #if 1
428  stopwatch.start("Model::downloadMap()");
429  surfelMaps[id] = model->downloadMap();
430 
431 
432 
433  Model::SurfelMap& surfelMap = surfelMaps[id];
434  ARMARX_VERBOSE << "SurfelMap contains " << surfelMap.numPoints << " points "
435  << "(container size: " << surfelMap.data->size() << ")";
436 
437  stopwatch.stop("Model::downloadMap()");
438 #endif
439  }
440 
441  newFrameAvailable = true;
442 }
443 
444 
445 void CoFusionProcessor::onStartCapture(float framesPerSecond)
446 {
447 
448 }
449 
450 void CoFusionProcessor::startCaptureForNumFrames(Ice::Int numFrames, const Ice::Current& c)
451 {
452  ARMARX_WARNING << "CoFusionProcessor does currently not support startCaptureForNumFrames(numFrames)";
453 }
454 
456 {
457 }
458 
460 {
461  if (newFrameAvailable)
462  {
463  newFrameAvailable = false;
464  resultPointCloud.reset();
465  reconstructResultPointCloud();
466  providePointCloud<OutPointT>(resultPointCloud);
467  }
468  else
469  {
470  usleep(1000);
471  }
472  return true;
473 }
474 
475 void CoFusionProcessor::reconstructResultPointCloud()
476 {
477  std::lock_guard<std::mutex> lock(surfelMapMutex);
478 
479  resultPointCloud.reset(new pcl::PointCloud<OutPointT>());
480 
481  // Create point cloud
482  ARMARX_INFO << "Reconstructing point cloud from " << surfelMaps.size() << " SurfelMaps.";
483  stopwatch.start("PointCloud construction");
484 
485  for (auto& it : surfelMaps)
486  {
487  unsigned int id = it.first;
488  Model::SurfelMap& surfelMap = it.second;
489 
490 
491  ARMARX_INFO << "SurfelMap of model " << id
492  << " contains " << surfelMap.numPoints << " points "
493  << "(container size: " << surfelMap.data->size() << ")";
494 
495  /* From Model.h @ struct SurfelMap:
496  * "Returns a vector of 4-float tuples: position0, color0, normal0, ..., positionN, colorN, normalN
497  * Where
498  * position is (x,y,z,conf),
499  * color is (color encoded as a 24-bit integer, <unused>, initTime, timestamp),
500  * and normal is (x,y,z,radius)"
501  */
502 
503  // try to reduce number of reallocations
504  resultPointCloud->reserve(resultPointCloud->size() + surfelMap.numPoints);
505 
506  for (unsigned int i = 0; i < surfelMap.numPoints; ++i)
507  {
508  Eigen::Vector4f& position4f = (*surfelMap.data)[i * 3 + 0];
509  position4f.head<3>() *= 1000; // meters to millimeters
510 
511  Eigen::Vector4f& color4f = (*surfelMap.data)[i * 3 + 1];
512  //Eigen::Vector4f& normal4f = (*surfelMap.data)[i * 3 + 2];
513 
514  uint32_t color24b = static_cast<uint32_t>(color4f(0));
515  uchar r = (color24b >> 16) & 0xFF;
516  uchar g = (color24b >> 8) & 0xFF;
517  uchar b = (color24b >> 0) & 0xFF;
518 
519  //Eigen::Vector3f normal = normal4f.head(3);
520  //float radius = normal4f(3);
521 
522 
523  //OutPointT point;
524  pcl::PointXYZRGBL point;
525  point.x = position4f(0);
526  point.y = position4f(1);
527  point.z = position4f(2);
528  point.r = r;
529  point.g = g;
530  point.b = b;
531  point.label = id; // model's id as label
532  resultPointCloud->push_back(point);
533  }
534 
535  // todo connect to prior memory and add object
536 
537  }
538 
539  if (downsamplingLeafSize > 0.f)
540  {
541  pcl::PointCloud<OutPointT>::Ptr filtered(new pcl::PointCloud<OutPointT>());
542  std::size_t sizeBefore = resultPointCloud->size();
543 
544  pcl::ApproximateVoxelGrid<OutPointT> grid;
545  grid.setLeafSize(downsamplingLeafSize, downsamplingLeafSize, downsamplingLeafSize);
546  grid.setInputCloud(resultPointCloud);
547  grid.filter(*filtered);
548  resultPointCloud.swap(filtered);
549 
550  std::size_t sizeAfter = resultPointCloud->size();
551 
552  ARMARX_INFO << "Downsampled to " << sizeAfter << " points (before: " << sizeBefore << ")";
553  }
554 
555  stopwatch.stop("PointCloud construction");
556 }
557 
558 visionx::MetaPointCloudFormatPtr CoFusionProcessor::getDefaultPointCloudFormat()
559 {
560  visionx::MetaPointCloudFormatPtr info = new visionx::MetaPointCloudFormat();
561  info->capacity = 50 * 640 * 480 * sizeof(visionx::ColoredLabeledPoint3D);
562  info->size = info->capacity;
563  info->type = visionx::eColoredLabeledPoints;
564  return info;
565 }
566 
567 void CoFusionProcessor::reportStopwatchStats()
568 {
569  ARMARX_INFO << "Stopwatch stats:\n " << stopwatch;
570 }
571 
572 
574 {
577 }
578 
579 
580 
ARMARX_VERBOSE
#define ARMARX_VERBOSE
Definition: Logging.h:180
FrameData
Definition: TactileSensor.h:61
armarx::CoFusionParams::iclnuim
bool iclnuim
Definition: CoFusionParams.h:32
armarx::CoFusionParams::errThresh
float errThresh
Definition: CoFusionParams.h:29
visionx::ImageProviderInfo::numberImages
int numberImages
Number of images.
Definition: ImageProcessor.h:506
armarx::CoFusionParams::countThresh
int countThresh
Definition: CoFusionParams.h:28
armarx::CoFusionProcessor::onConnectComponent
virtual void onConnectComponent() override
Definition: CoFusionProcessor.cpp:147
armarx::CoFusionProcessor::onDisconnectImageProcessor
virtual void onDisconnectImageProcessor() override
Frees imageRgb and imageDepth.
Definition: CoFusionProcessor.cpp:76
visionx::CapturingPointCloudProvider::setPointCloudSyncMode
void setPointCloudSyncMode(ImageSyncMode pointCloudSyncMode)
Sets the point cloud synchronization mode.
Definition: CapturingPointCloudProvider.cpp:190
armarx::CoFusionParams::width
int width
Definition: CoFusionParams.h:49
armarx::CoFusionProcessor::startCaptureForNumFrames
virtual void startCaptureForNumFrames(Ice::Int numFrames, const Ice::Current &c=Ice::emptyCurrent) override
Definition: CoFusionProcessor.cpp:450
armarx::CoFusionParams::closeLoops
bool closeLoops
Definition: CoFusionParams.h:31
visionx::ImageProcessor::getImageProvider
ImageProviderInfo getImageProvider(std::string name, ImageType destinationImageType=eRgb, bool waitForProxy=false)
Select an ImageProvider.
Definition: ImageProcessor.cpp:152
armarx::CoFusionParams::timeDelta
int timeDelta
Definition: CoFusionParams.h:27
armarx::CoFusionParams::initConfidenceGlobal
float initConfidenceGlobal
Definition: CoFusionParams.h:35
c
constexpr T c
Definition: UnscentedKalmanFilterTest.cpp:43
armarx::CoFusionProcessor::createPropertyDefinitions
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
Definition: CoFusionProcessor.cpp:573
armarx::CoFusionParams::covThresh
float covThresh
Definition: CoFusionParams.h:30
armarx::CoFusionParams::fastOdom
bool fastOdom
Definition: CoFusionParams.h:39
armarx::CoFusionProcessor::process
virtual void process() override
Performs the image processing.
Definition: CoFusionProcessor.cpp:228
armarx::CoFusionParams::modelSpawnOffset
unsigned modelSpawnOffset
Definition: CoFusionParams.h:43
visionx::tools::createByteImage
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.
armarx::CoFusionProcessor::onDisconnectComponent
virtual void onDisconnectComponent() override
Hook for subclass.
Definition: CoFusionProcessor.cpp:203
visionx::ImageProviderInfo
Definition: ImageProcessor.h:466
armarx::CoFusionParams::depthCut
float depthCut
Definition: CoFusionParams.h:37
armarx::CoFusionProcessor::onStopCapture
virtual void onStopCapture() override
This is called when the point cloud provider capturing has been stopped.
Definition: CoFusionProcessor.cpp:455
armarx::CoFusionProcessor::doCapture
virtual bool doCapture() override
Main capturing function.
Definition: CoFusionProcessor.cpp:459
armarx::CoFusionProcessor::onInitCapturingPointCloudProvider
virtual void onInitCapturingPointCloudProvider() override
This is called when the Component::onInitComponent() is called.
Definition: CoFusionProcessor.cpp:94
armarx::CoFusionProcessor::onInitComponent
virtual void onInitComponent() override
Definition: CoFusionProcessor.cpp:105
cxxopts::value
std::shared_ptr< Value > value()
Definition: cxxopts.hpp:926
armarx::CoFusionProcessor::onStartCapture
virtual void onStartCapture(float framesPerSecond) override
This is called when the point cloud provider capturing has been started.
Definition: CoFusionProcessor.cpp:445
armarx::CoFusionProcessor::getDefaultPointCloudFormat
virtual visionx::MetaPointCloudFormatPtr getDefaultPointCloudFormat() override
default point cloud format used to initialize shared memory
Definition: CoFusionProcessor.cpp:558
armarx::CoFusionParams::fy
float fy
Definition: CoFusionParams.h:52
armarx::CoFusionParams::initConfidenceObject
float initConfidenceObject
Definition: CoFusionParams.h:36
armarx::CoFusionProcessor::onNewModel
void onNewModel(std::shared_ptr< Model > model)
Definition: CoFusionProcessor.cpp:215
armarx::CoFusionParams::fernThresh
float fernThresh
Definition: CoFusionParams.h:40
armarx::CoFusionParams::photoThresh
float photoThresh
Definition: CoFusionParams.h:34
stopwatch
Definition: Stopwatch.cpp:4
armarx::CoFusionProcessor::onExitCapturingPointCloudProvider
virtual void onExitCapturingPointCloudProvider() override
This is called when the Component::onExitComponent() setup is called.
Definition: CoFusionProcessor.cpp:99
armarx::CoFusionProcessor::onExitImageProcessor
virtual void onExitImageProcessor() override
Exit the ImapeProcessor component.
Definition: CoFusionProcessor.cpp:88
visionx::ImageProcessor::usingImageProvider
void usingImageProvider(std::string name)
Registers a delayed topic subscription and a delayed provider proxy retrieval which all will be avail...
Definition: ImageProcessor.cpp:117
armarx::CoFusionParams::exportSegmentationResults
bool exportSegmentationResults
Definition: CoFusionParams.h:47
armarx::CoFusionParams::fx
float fx
Definition: CoFusionParams.h:51
ARMARX_ERROR
#define ARMARX_ERROR
Definition: Logging.h:189
visionx::ImageProcessor::getImages
int getImages(CByteImage **ppImages)
Poll images from provider.
Definition: ImageProcessor.cpp:351
armarx::CoFusionParams::cx
float cx
Definition: CoFusionParams.h:53
ARMARX_LOG
#define ARMARX_LOG
Definition: Logging.h:163
armarx::CoFusionParams::exportDirectory
std::string exportDirectory
Definition: CoFusionParams.h:46
armarx::CoFusionParams::matchingType
Model::MatchingType matchingType
Definition: CoFusionParams.h:44
armarx::CoFusionProcessor::onInactiveModel
void onInactiveModel(std::shared_ptr< Model > model)
Definition: CoFusionProcessor.cpp:221
armarx::Component::getConfigIdentifier
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition: Component.cpp:74
armarx::CoFusionProcessor::onExitComponent
virtual void onExitComponent() override
Definition: CoFusionProcessor.cpp:209
armarx::CoFusionParams::height
int height
Definition: CoFusionParams.h:50
ARMARX_INFO
#define ARMARX_INFO
Definition: Logging.h:174
armarx::CoFusionParams::reloc
bool reloc
Definition: CoFusionParams.h:33
IceUtil::Handle< class PropertyDefinitionContainer >
ImageUtil.h
armarx::VariantType::Int
const VariantTypeId Int
Definition: Variant.h:916
armarx::Logging::deactivateSpam
SpamFilterDataPtr deactivateSpam(float deactivationDurationSec=10.0f, const std::string &identifier="", bool deactivate=true) const
disables the logging for the current line for the given amount of seconds.
Definition: Logging.cpp:92
armarx::CoFusionParams::cy
float cy
Definition: CoFusionParams.h:54
armarx::CoFusionProcessor::onInitImageProcessor
virtual void onInitImageProcessor() override
Setup the vision component.
Definition: CoFusionProcessor.cpp:45
armarx::PeriodicTask
Definition: ArmarXManager.h:70
ARMARX_WARNING
#define ARMARX_WARNING
Definition: Logging.h:186
armarx::PropertyDefinitionsPtr
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
Definition: forward_declarations.h:34
armarx::CoFusionParams::makeCoFusion
std::unique_ptr< CoFusion > makeCoFusion() const
Definition: CoFusionParams.cpp:52
armarx::CoFusionProcessor::onConnectImageProcessor
virtual void onConnectImageProcessor() override
Allocates imageRgb and imageDepth and sets images[].
Definition: CoFusionProcessor.cpp:54
armarx::CoFusionProcessorPropertyDefinitions
Definition: CoFusionProcessor.h:56
armarx
This file offers overloads of toIce() and fromIce() functions for STL container types.
Definition: ArmarXTimeserver.cpp:28
armarx::CoFusionParams::so3
bool so3
Definition: CoFusionParams.h:41
CoFusionProcessor.h
armarx::CoFusionParams::icpThresh
float icpThresh
Definition: CoFusionParams.h:38
armarx::CoFusionParams::frameToFrameRGB
bool frameToFrameRGB
Definition: CoFusionParams.h:42
visionx::ImageProcessor::waitForImages
bool waitForImages(int milliseconds=1000)
Wait for new images.
Definition: ImageProcessor.cpp:275