FeatureCalculation.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 #include "FeatureCalculation.h"
25 
26 #include "MSERCalculation.h"
27 #include "OLPTools.h"
28 
29 
30 // IVT
31 #include <opencv2/calib3d.hpp>
32 
33 #include <Calibration/Calibration.h>
34 #include <Calibration/Rectification.h>
35 #include <Calibration/StereoCalibration.h>
36 #include <Features/SIFTFeatures/SIFTFeatureCalculator.h>
37 #include <Features/SIFTFeatures/SIFTFeatureEntry.h>
38 #include <Image/ByteImage.h>
39 #include <Image/ImageProcessor.h>
40 #include <Image/IplImageAdaptor.h>
41 #include <Image/StereoMatcher.h>
42 #include <Math/Math3d.h>
43 #include <Threading/Threading.h>
44 
45 
46 // OpenCV
47 #include <opencv2/opencv.hpp>
48 
49 // OpenMP
50 #include <omp.h>
51 
52 // system
53 #include <sys/time.h>
54 
56 
58 {
59  // create SIFTFeatureCalculator
60  m_pSIFTFeatureCalculator = new CSIFTFeatureCalculator();
61  // create HarrisSIFTFeatureCalculator
62  //m_pHarrisSIFTFeatureCalculator = new CHarrisSIFTFeatureCalculator(0.0002f, 3, 1000); // 0.01f, 3, 500
63  //CDynamicArray* pResultList = new CDynamicArray(100);
64 
65  m_pInterestPoints = new Vec2d[m_nMaxNumInterestPoints];
66 }
67 
69 {
70  //delete m_pSIFTFeatureCalculator;
71  delete[] m_pInterestPoints;
72 }
73 
74 void
75 CFeatureCalculation::GetAllFeaturePoints(const CByteImage* pImageLeftColor,
76  const CByteImage* pImageRightColor,
77  const CByteImage* pImageLeftGrey,
78  const CByteImage* pImageRightGrey,
79  const int nDisparityPointDistance,
80  CStereoCalibration* pStereoCalibration,
81  CSIFTFeatureArray& aAllSIFTPoints,
82  std::vector<CMSERDescriptor3D*>& aAllMSERs,
83  std::vector<CHypothesisPoint*>& aPointsFromDepthImage,
84  CByteImage* pDisparityImage,
85  std::vector<Vec3d>* pAll3DPoints)
86 {
87  timeval tStart, tEnd;
88  gettimeofday(&tStart, 0);
89  omp_set_nested(true);
90 
91 #pragma omp parallel sections
92  {
93 
94  //**************************************************************************************************************
95  // calculate Harris interest points and find stereo correspondences
96  //**************************************************************************************************************
97 
98 
99 #pragma omp section
100  {
101  const int nNumFeatures =
102  ImageProcessor::CalculateHarrisInterestPoints(pImageLeftGrey,
103  m_pInterestPoints,
104  m_nMaxNumInterestPoints,
106  OLP_HARRIS_POINT_DISTANCE); // 0.001
107  //ARMARX_VERBOSE_S << << " --- Number of Features: %d ---\n", nNumFeatures);
108 
109  // search corresponding features in right image
110  // create StereoMatcher
111  CStereoMatcher* stereoMatcher = new CStereoMatcher();
112  stereoMatcher->InitCameraParameters(pStereoCalibration, false);
113  const int nDispMin = stereoMatcher->GetDisparityEstimate(4 * OLP_MAX_OBJECT_DISTANCE);
114  const int nDispMax =
115  stereoMatcher->GetDisparityEstimate(0.2f * OLP_MAX_OBJECT_DISTANCE);
116 
117 #pragma omp parallel
118  {
119  CDynamicArray afSIFTDescriptors(4);
120 
121 #pragma omp for schedule(static, 32)
122 
123  for (int i = 0; i < nNumFeatures; i++)
124  {
125  if (m_pInterestPoints[i].x > OLP_MIN_X_VALUE_SIFT_POINTS)
126  {
127  Vec2d vCorrespondingPointRight;
128  Vec3d vPoint3D;
129 
130  // img l, img r, px, py, size of correlation window, min disparity, max disparity,
131  // corresponding point 2d, 3d point, correlation threshold, images are undistorted
132  int nMatchingResult = stereoMatcher->Match(pImageLeftGrey,
133  pImageRightGrey,
134  (int)m_pInterestPoints[i].x,
135  (int)m_pInterestPoints[i].y,
136  10,
137  nDispMin,
138  nDispMax,
139  vCorrespondingPointRight,
140  vPoint3D,
141  0.7f,
142  true);
143 
144  if (nMatchingResult >= 0)
145  {
146  if (vPoint3D.z < OLP_MAX_OBJECT_DISTANCE)
147  {
148  m_pSIFTFeatureCalculator->CreateSIFTDescriptors(
149  pImageLeftGrey,
150  &afSIFTDescriptors,
151  m_pInterestPoints[i].x,
152  m_pInterestPoints[i].y,
153  1.0f,
154  false,
155  false);
156 
157  if (afSIFTDescriptors.GetSize() > 0)
158  {
159 #pragma omp critical
160  {
161  aAllSIFTPoints.AddElement(
162  (CSIFTFeatureEntry*)((CSIFTFeatureEntry*)
163  afSIFTDescriptors[0])
164  ->Clone());
165  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d =
166  vPoint3D;
167  }
168  CSIFTFeatureEntry* pFeature =
169  (CSIFTFeatureEntry*)afSIFTDescriptors[0];
170  afSIFTDescriptors[0]->bDelete = false;
171  afSIFTDescriptors.Clear();
172  delete pFeature;
173  }
174  }
175  }
176  }
177  }
178  }
179 
180  delete stereoMatcher;
181  }
182 
183 
184  //**************************************************************************************************************
185  // find MSER regions
186  //**************************************************************************************************************
187 
188 
189 #pragma omp section
190  {
191 #ifdef OLP_USE_MSERS
192  CMSERCalculation::FindMSERs3D(pImageLeftColor,
193  pImageRightColor,
194  pStereoCalibration,
196  aAllMSERs);
197 #endif
198 
199  //gettimeofday(&tEnd, 0);
200  //tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
201  //ARMARX_VERBOSE_S << << "Time for stereo correspondences: %d ms\n", tTimeDiff);
202 
203 
204  //ARMARX_VERBOSE_S << << "z distances:\n");
205  //for (int i=0; i<vFeaturePoints3d.GetSize(); i++)
206  // ARMARX_VERBOSE_S << << "%d ", (int)vFeaturePoints3d[i].z);
207  }
208 
209 
210  //**************************************************************************************************************
211  // get 3-d points from a disparity map
212  //**************************************************************************************************************
213 
214 #pragma omp section
215  {
216 #ifdef OLP_USE_DEPTH_MAP
217 
218  //timeval tStart, tEnd;
219  //gettimeofday(&tStart, 0);
220 
221  GetPointsFromDisparity(pImageLeftColor,
222  pImageRightColor,
223  pImageLeftGrey,
224  pImageRightGrey,
225  pStereoCalibration,
226  nDisparityPointDistance,
227  aPointsFromDepthImage,
228  pDisparityImage,
229  pAll3DPoints);
230 
231  //ARMARX_VERBOSE_S << << "\nGot %d points from the disparity map\n\n", (int)aPointsFromDisparity.size());
232 
233  //gettimeofday(&tEnd, 0);
234  //long tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
235  //ARMARX_VERBOSE_S << << "Time for disparity: %ld ms\n", tTimeDiff);
236 
237 #endif
238  }
239  }
240 
241  gettimeofday(&tEnd, 0);
242  long tTimeDiff =
243  (1000 * tEnd.tv_sec + tEnd.tv_usec / 1000) - (1000 * tStart.tv_sec + tStart.tv_usec / 1000);
244  ARMARX_VERBOSE_S << "Time for GetAllFeaturePoints(): " << tTimeDiff << " ms";
245 }
246 
247 
248 #ifdef OLP_USE_DEPTH_MAP
249 
250 void
251 CFeatureCalculation::GetAllFeaturePoints(const CByteImage* pImageLeftColor,
252  const CByteImage* pImageLeftGrey,
253  const pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointcloud,
254  const int nDisparityPointDistance,
255  CSIFTFeatureArray& aAllSIFTPoints,
256  std::vector<CMSERDescriptor3D*>& aAllMSERs,
257  std::vector<CHypothesisPoint*>& aPointsFromDepthImage,
258  const CCalibration* calibration)
259 {
260  timeval tStart, tEnd;
261  gettimeofday(&tStart, 0);
262  omp_set_nested(true);
263 
264 #pragma omp parallel sections
265  {
266 
267  //**************************************************************************************************************
268  // calculate Harris interest points and find stereo correspondences
269  //**************************************************************************************************************
270 
271 
272 #pragma omp section
273  {
274  const int nNumFeatures =
275  ImageProcessor::CalculateHarrisInterestPoints(pImageLeftGrey,
276  m_pInterestPoints,
277  m_nMaxNumInterestPoints,
279  OLP_HARRIS_POINT_DISTANCE); // 0.001
280  //ARMARX_VERBOSE_S << << " --- Number of Features: %d ---\n", nNumFeatures);
281 
282 #pragma omp parallel
283  {
284  CDynamicArray afSIFTDescriptors(4);
285 
286 #pragma omp for schedule(static, 32)
287  for (int i = 0; i < nNumFeatures; i++)
288  {
289  if (m_pInterestPoints[i].x > OLP_MIN_X_VALUE_SIFT_POINTS)
290  {
291  Vec3d point3D = GetCorresponding3DPoint(m_pInterestPoints[i], pointcloud);
292 
293  if (point3D.z > 0)
294  {
295  if (point3D.z < OLP_MAX_OBJECT_DISTANCE)
296  {
297  m_pSIFTFeatureCalculator->CreateSIFTDescriptors(
298  pImageLeftGrey,
299  &afSIFTDescriptors,
300  m_pInterestPoints[i].x,
301  m_pInterestPoints[i].y,
302  1.0f,
303  false,
304  false);
305 
306  if (afSIFTDescriptors.GetSize() > 0)
307  {
308 #pragma omp critical
309  {
310  aAllSIFTPoints.AddElement(
311  (CSIFTFeatureEntry*)((CSIFTFeatureEntry*)
312  afSIFTDescriptors[0])
313  ->Clone());
314  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d.x =
315  point3D.x;
316  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d.y =
317  point3D.y;
318  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d.z =
319  point3D.z;
320  }
321  CSIFTFeatureEntry* pFeature =
322  (CSIFTFeatureEntry*)afSIFTDescriptors[0];
323  afSIFTDescriptors[0]->bDelete = false;
324  afSIFTDescriptors.Clear();
325  delete pFeature;
326  }
327  }
328  }
329  }
330  }
331  }
332  }
333 
334 
335  //**************************************************************************************************************
336  // find MSER regions
337  //**************************************************************************************************************
338 
339 
340 #pragma omp section
341  {
342 #ifdef OLP_USE_MSERS
343 
344  CByteImage* pHSVImageLeft = new CByteImage(pImageLeftColor);
345  ImageProcessor::CalculateHSVImage(pImageLeftColor, pHSVImageLeft);
346 
347  std::vector<CMSERDescriptor*> aMSERDescriptors2D;
348  CMSERCalculation::FindMSERs2D(pImageLeftColor, pHSVImageLeft, aMSERDescriptors2D);
349 
350  for (size_t i = 0; i < aMSERDescriptors2D.size(); i++)
351  {
352  CMSERDescriptor* descriptor2D = aMSERDescriptors2D.at(i);
353  CMSERDescriptor3D* descriptor3D = new CMSERDescriptor3D();
354  descriptor3D->pRegionLeft = descriptor2D;
355 
356  const Vec2d mean2D = descriptor2D->vMean;
357  descriptor3D->vPosition = GetCorresponding3DPoint(mean2D, pointcloud);
358 
359  Vec2d vSigmaPoint2D, vTemp;
360 
361  Math2d::MulVecScalar(
362  descriptor2D->vEigenvector1, -sqrtf(descriptor2D->fEigenvalue1), vTemp);
363  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
364  descriptor3D->vSigmaPoint1a = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
365 
366  Math2d::MulVecScalar(
367  descriptor2D->vEigenvector1, sqrtf(descriptor2D->fEigenvalue1), vTemp);
368  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
369  descriptor3D->vSigmaPoint1b = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
370 
371  Math2d::MulVecScalar(
372  descriptor2D->vEigenvector2, -sqrtf(descriptor2D->fEigenvalue2), vTemp);
373  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
374  descriptor3D->vSigmaPoint2a = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
375 
376  Math2d::MulVecScalar(
377  descriptor2D->vEigenvector2, sqrtf(descriptor2D->fEigenvalue2), vTemp);
378  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
379  descriptor3D->vSigmaPoint2b = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
380 
381  aAllMSERs.push_back(descriptor3D);
382  }
383 
384 #endif
385  }
386 
387 
388  //**************************************************************************************************************
389  // get 3D points from the depth map
390  //**************************************************************************************************************
391 
392 #pragma omp section
393  {
394  const float fIntensityFactor = 1.0f / 255.0f;
395  const size_t width = pointcloud->width;
396  const size_t height = pointcloud->height;
397 
398  CByteImage* leftImageGaussFiltered = new CByteImage(pImageLeftColor);
399  ::ImageProcessor::GaussianSmooth3x3(pImageLeftColor, leftImageGaussFiltered);
400 
401  for (size_t i = 0; i < height; i += nDisparityPointDistance)
402  {
403  for (size_t j = 0; j < width; j += nDisparityPointDistance)
404  {
405  const pcl::PointXYZRGBA point3D = pointcloud->at(i * width + j);
406 
407  if (point3D.z > 0)
408  {
409  if (point3D.z < OLP_MAX_OBJECT_DISTANCE)
410  {
411  // create point descriptor
412  CHypothesisPoint* pNewPoint = new CHypothesisPoint();
414  Vec3d vPoint3D = {point3D.x, point3D.y, point3D.z};
415  Math3d::SetVec(pNewPoint->vPosition, vPoint3D);
416  Math3d::SetVec(pNewPoint->vOldPosition, vPoint3D);
417  pNewPoint->pMSERDescriptor = NULL;
418  pNewPoint->pFeatureDescriptors = new CSIFTFeatureArray();
419  pNewPoint->fMembershipProbability = 0;
420 
421  Vec2d vPoint2D;
422  calibration->CameraToImageCoordinates(vPoint3D, vPoint2D, false);
423  int u = (int)(vPoint2D.x + 0.5f);
424  int v = (int)(vPoint2D.y + 0.5f);
425  u = (u < 0) ? 0
426  : ((u > leftImageGaussFiltered->width - 1)
427  ? leftImageGaussFiltered->width - 1
428  : u);
429  v = (v < 0) ? 0
430  : ((v > leftImageGaussFiltered->height - 1)
431  ? leftImageGaussFiltered->height - 1
432  : v);
433  float r, g, b;
434  const float weight = 1.0f;
435  r = weight * leftImageGaussFiltered
436  ->pixels[3 * (v * leftImageGaussFiltered->width + u)] +
437  (1.0f - weight) * point3D.r;
438  g = weight *
439  leftImageGaussFiltered
440  ->pixels[3 * (v * leftImageGaussFiltered->width + u) + 1] +
441  (1.0f - weight) * point3D.g;
442  b = weight *
443  leftImageGaussFiltered
444  ->pixels[3 * (v * leftImageGaussFiltered->width + u) + 2] +
445  (1.0f - weight) * point3D.b;
446 
447  const float fIntensity = (r + g + b) / 3.0f;
448  const float normalizationFactor = 1.0f / (fIntensity + 3.0f);
449  pNewPoint->fIntensity = fIntensity * fIntensityFactor;
450  pNewPoint->fColorR = normalizationFactor * (r + 1);
451  pNewPoint->fColorG = normalizationFactor * (g + 1);
452  pNewPoint->fColorB = normalizationFactor * (b + 1);
453 
454  aPointsFromDepthImage.push_back(pNewPoint);
455  }
456  }
457  }
458  }
459 
460  delete leftImageGaussFiltered;
461  }
462  }
463 
464  gettimeofday(&tEnd, 0);
465  long tTimeDiff =
466  (1000 * tEnd.tv_sec + tEnd.tv_usec / 1000) - (1000 * tStart.tv_sec + tStart.tv_usec / 1000);
467  ARMARX_VERBOSE_S << "Time for GetAllFeaturePoints(): " << tTimeDiff << " ms";
468 }
469 
470 #endif
471 
472 static void
473 doStereoSGBM(int minDisparity,
474  int numDisparities,
475  int nSADWindowSize,
476  int P1,
477  int P2,
478  int disp12MaxDiff,
479  cv::Mat leftInput,
480  cv::Mat rightInput,
481  cv::Mat output)
482 {
483  int preFilterCap = 0;
484  int uniquenessRatio = 0;
485  int speckleWindowSize = 0;
486  int speckleRange = 0;
487 #if CV_MAJOR_VERSION == 2
488  bool fullDP = true;
489  cv::StereoSGBM stereoSGBM(minDisparity,
490  numDisparities,
491  nSADWindowSize,
492  P1,
493  P2,
494  disp12MaxDiff,
495  preFilterCap,
496  uniquenessRatio,
497  speckleWindowSize,
498  speckleRange,
499  fullDP);
500  stereoSGBM(leftInput, rightInput, output);
501 #else
502  cv::Ptr<cv::StereoSGBM> stereoSGBM = cv::StereoSGBM::create(minDisparity,
503  numDisparities,
504  nSADWindowSize,
505  P1,
506  P2,
507  disp12MaxDiff,
508  preFilterCap,
509  uniquenessRatio,
510  speckleWindowSize,
511  speckleRange,
512  cv::StereoSGBM::MODE_SGBM);
513  stereoSGBM->compute(leftInput, rightInput, output);
514 #endif
515 }
516 
517 void
518 CFeatureCalculation::GetPointsFromDisparity(const CByteImage* pImageLeftColor,
519  const CByteImage* pImageRightColor,
520  const CByteImage* pImageLeftGrey,
521  const CByteImage* pImageRightGrey,
522  CStereoCalibration* pStereoCalibration,
523  const int nDisparityPointDistance,
524  std::vector<CHypothesisPoint*>& aPointsFromDisparity,
525  CByteImage* pDisparityImage,
526  std::vector<Vec3d>* pAll3DPoints)
527 {
528  // rectify images
529  CByteImage *pRectifiedImageLeftGrey, *pRectifiedImageRightGrey,
530  *pRectifiedImageLeftGreyHalfsize, *pRectifiedImageRightGreyHalfsize,
531  *pRectifiedImageLeftGreyQuartersize, *pRectifiedImageRightGreyQuartersize;
532  IplImage *pRectifiedIplImageLeft, *pRectifiedIplImageRight, *pRectifiedIplImageLeftHalfsize,
533  *pRectifiedIplImageRightHalfsize, *pRectifiedIplImageLeftQuartersize,
534  *pRectifiedIplImageRightQuartersize;
535  CByteImage* pRectifiedImageLeftColorGaussFiltered;
536  cv::Mat mDispImg, mDispImgHalfsize, mDispImgQuartersize;
537  bool bGreyImagesReady = false;
538  bool bGreyImagesHalfsizeReady = false;
539  bool bGreyImagesQuartersizeReady = false;
540 
541 #pragma omp parallel sections
542  {
543 #pragma omp section
544  {
545  const CByteImage* ppOriginalImages[2];
546  ppOriginalImages[0] = pImageLeftGrey;
547  ppOriginalImages[1] = pImageRightGrey;
548  pRectifiedImageLeftGrey =
549  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
550  pRectifiedImageRightGrey =
551  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
552  CByteImage* ppRectifiedImages[2];
553  ppRectifiedImages[0] = pRectifiedImageLeftGrey;
554  ppRectifiedImages[1] = pRectifiedImageRightGrey;
555 
556  CRectification* pRectification = new CRectification(true, false);
557  pRectification->Init(pStereoCalibration);
558  pRectification->Rectify(ppOriginalImages, ppRectifiedImages);
559 
560  pRectifiedIplImageLeft = IplImageAdaptor::Adapt(pRectifiedImageLeftGrey);
561  pRectifiedIplImageRight = IplImageAdaptor::Adapt(pRectifiedImageRightGrey);
562  bGreyImagesReady = true;
563 
564  // half size
565  pRectifiedImageLeftGreyHalfsize =
566  new CByteImage(OLP_IMG_WIDTH / 2, OLP_IMG_HEIGHT / 2, CByteImage::eGrayScale);
567  pRectifiedImageRightGreyHalfsize =
568  new CByteImage(OLP_IMG_WIDTH / 2, OLP_IMG_HEIGHT / 2, CByteImage::eGrayScale);
569  ImageProcessor::Resize(pRectifiedImageLeftGrey, pRectifiedImageLeftGreyHalfsize);
570  ImageProcessor::Resize(pRectifiedImageRightGrey, pRectifiedImageRightGreyHalfsize);
571 
572  pRectifiedIplImageLeftHalfsize =
573  IplImageAdaptor::Adapt(pRectifiedImageLeftGreyHalfsize);
574  pRectifiedIplImageRightHalfsize =
575  IplImageAdaptor::Adapt(pRectifiedImageRightGreyHalfsize);
576  bGreyImagesHalfsizeReady = true;
577 
578  // quarter size
579  pRectifiedImageLeftGreyQuartersize =
580  new CByteImage(OLP_IMG_WIDTH / 4, OLP_IMG_HEIGHT / 4, CByteImage::eGrayScale);
581  pRectifiedImageRightGreyQuartersize =
582  new CByteImage(OLP_IMG_WIDTH / 4, OLP_IMG_HEIGHT / 4, CByteImage::eGrayScale);
583  ImageProcessor::Resize(pRectifiedImageLeftGrey, pRectifiedImageLeftGreyQuartersize);
584  ImageProcessor::Resize(pRectifiedImageRightGrey, pRectifiedImageRightGreyQuartersize);
585 
586  pRectifiedIplImageLeftQuartersize =
587  IplImageAdaptor::Adapt(pRectifiedImageLeftGreyQuartersize);
588  pRectifiedIplImageRightQuartersize =
589  IplImageAdaptor::Adapt(pRectifiedImageRightGreyQuartersize);
590  bGreyImagesQuartersizeReady = true;
591 
592  delete pRectification;
593  }
594 
595 #pragma omp section
596  {
597  const CByteImage* ppOriginalImages[2];
598  ppOriginalImages[0] = pImageLeftColor;
599  ppOriginalImages[1] = pImageRightColor;
600  CByteImage* pRectifiedImageLeftColor =
601  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
602  CByteImage* pRectifiedImageRightColor =
603  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
604  CByteImage* ppRectifiedImages[2];
605  ppRectifiedImages[0] = pRectifiedImageLeftColor;
606  ppRectifiedImages[1] = pRectifiedImageRightColor;
607 
608  CRectification* pRectification = new CRectification(true, false);
609  pRectification->Init(pStereoCalibration);
610  pRectification->Rectify(ppOriginalImages, ppRectifiedImages);
611 
612  pRectifiedImageLeftColorGaussFiltered =
613  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
614  ImageProcessor::GaussianSmooth3x3(pRectifiedImageLeftColor,
615  pRectifiedImageLeftColorGaussFiltered);
616 
617  delete pRectifiedImageLeftColor;
618  delete pRectifiedImageRightColor;
619  delete pRectification;
620  }
621 
622 
623  // get disparity using semi-global block matching
624 
625 #pragma omp section
626  {
627  while (!bGreyImagesReady)
628  {
629  Threading::YieldThread();
630  }
631 
632  // StereoSGBM::StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1=0, int P2=0, int disp12MaxDiff=0,
633  // int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, bool fullDP=false)
634  //
635  // minDisparity – Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
636  // numDisparities – Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
637  // SADWindowSize – Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.
638  // P1 – The first parameter controlling the disparity smoothness. See below.
639  // P2 – The second parameter controlling the disparity smoothness. The larger the values are, the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor pixels. The algorithm requires P2 > P1 . See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown (like 8*number_of_image_channels*SADWindowSize*SADWindowSize and 32*number_of_image_channels*SADWindowSize*SADWindowSize , respectively).
640  // disp12MaxDiff – Maximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to a non-positive value to disable the check.
641  // preFilterCap – Truncation value for the prefiltered image pixels. The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. The result values are passed to the Birchfield-Tomasi pixel cost function.
642  // uniquenessRatio – Margin in percentage by which the best (minimum) computed cost function value should “win” the second best value to consider the found match correct. Normally, a value within the 5-15 range is good enough.
643  // speckleWindowSize – Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
644  // speckleRange – Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, multiple of 16. Normally, 16 or 32 is good enough.
645  // fullDP – Set it to true to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to false.
646  const cv::Mat mLeftImg = cv::cvarrToMat(pRectifiedIplImageLeft);
647  const cv::Mat mRightImg = cv::cvarrToMat(pRectifiedIplImageRight);
648  const int nSADWindowSize = 7; // 11
649  const int nPenaltyDispDiffOne = 8 * 3 * nSADWindowSize * nSADWindowSize; // 400
650  const int nPenaltyDispDiffBiggerOne = 32 * 3 * nSADWindowSize * nSADWindowSize; // 600
651 
652  doStereoSGBM(1,
653  8 * 16,
654  nSADWindowSize,
655  nPenaltyDispDiffOne,
656  nPenaltyDispDiffBiggerOne,
657  4,
658  mLeftImg,
659  mRightImg,
660  mDispImg);
661  }
662 
663 
664 #pragma omp section
665  {
666  while (!bGreyImagesHalfsizeReady)
667  {
668  Threading::YieldThread();
669  }
670 
671  const cv::Mat mLeftImg = cv::cvarrToMat(pRectifiedIplImageLeftHalfsize);
672  const cv::Mat mRightImg = cv::cvarrToMat(pRectifiedIplImageRightHalfsize);
673  const int nSADWindowSize = 7;
674  const int nPenaltyDispDiffOne = 8 * 3 * nSADWindowSize * nSADWindowSize; // 400
675  const int nPenaltyDispDiffBiggerOne = 32 * 3 * nSADWindowSize * nSADWindowSize; // 600
676 
677  doStereoSGBM(1,
678  4 * 16,
679  nSADWindowSize,
680  nPenaltyDispDiffOne,
681  nPenaltyDispDiffBiggerOne,
682  4,
683  mLeftImg,
684  mRightImg,
685  mDispImg);
686  }
687 
688 
689 #pragma omp section
690  {
691  while (!bGreyImagesQuartersizeReady)
692  {
693  Threading::YieldThread();
694  }
695 
696  const cv::Mat mLeftImg = cv::cvarrToMat(pRectifiedIplImageLeftQuartersize);
697  const cv::Mat mRightImg = cv::cvarrToMat(pRectifiedIplImageRightQuartersize);
698  const int nSADWindowSize = 7;
699  const int nPenaltyDispDiffOne = 8 * 3 * nSADWindowSize * nSADWindowSize; // 400
700  const int nPenaltyDispDiffBiggerOne = 32 * 3 * nSADWindowSize * nSADWindowSize; // 600
701 
702  doStereoSGBM(1,
703  4 * 16,
704  nSADWindowSize,
705  nPenaltyDispDiffOne,
706  nPenaltyDispDiffBiggerOne,
707  4,
708  mLeftImg,
709  mRightImg,
710  mDispImg);
711  }
712  }
713 
714 
715  CStereoMatcher* pStereoMatcher = new CStereoMatcher();
716  CStereoCalibration* pStereoCalibrationCopy = new CStereoCalibration(*pStereoCalibration);
717  pStereoMatcher->InitCameraParameters(pStereoCalibrationCopy, false);
718  const int nDispMin = pStereoMatcher->GetDisparityEstimate(4000);
719  const int nDispMax = pStereoMatcher->GetDisparityEstimate(200);
720  Vec2d vImgPointLeft, vImgPointRight;
721  Vec3d vPoint3D;
722  const float fIntensityFactor = 1.0f / (3 * 255);
723 
724 
725  // combine disparities
726  float pDisparities[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
727 #pragma omp parallel for schedule(static, 80)
728  for (int i = 0; i < OLP_IMG_HEIGHT; i++)
729  {
730  for (int j = 0; j < OLP_IMG_WIDTH; j++)
731  {
732  int nDispFullsize = mDispImg.at<short>(i, j) / 16;
733  int nDispHalfsize = 2 * mDispImgHalfsize.at<short>(i / 2, j / 2) / 16;
734  int nDispQuartersize = 4 * mDispImgQuartersize.at<short>(i / 4, j / 4) / 16;
735 
736  int nDisp = 0;
737  float fDisp = 0;
738  int nNumValidDisparities = 0;
739 
740  if ((nDispFullsize > nDispMin) && (nDispFullsize < nDispMax))
741  {
742  nDisp += nDispFullsize;
743  nNumValidDisparities++;
744  }
745 
746  if ((nDispHalfsize > nDispMin) && (nDispHalfsize < nDispMax))
747  {
748  nDisp += nDispHalfsize;
749  nNumValidDisparities++;
750  }
751 
752  if ((nDispQuartersize > nDispMin) && (nDispQuartersize < nDispMax))
753  {
754  nDisp += nDispQuartersize;
755  nNumValidDisparities++;
756  }
757 
758  if (nNumValidDisparities > 0)
759  {
760  fDisp = (float)nDisp / (float)nNumValidDisparities;
761  }
762 
763  pDisparities[i * OLP_IMG_WIDTH + j] = fDisp;
764  }
765  }
766 
767 
768  // fill holes
769  if (false)
770  {
771  int pDisparitiesCopy[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
772 
773  for (int i = 0; i < OLP_IMG_HEIGHT * OLP_IMG_WIDTH; i++)
774  {
775  pDisparitiesCopy[i] = pDisparities[i];
776  }
777 
778  const int nMargin = 10;
779  const int nHoleFillingAveragingRadius = 5;
780 
781  for (int i = nMargin; i < OLP_IMG_HEIGHT - nMargin; i++)
782  {
783  for (int j = nMargin; j < OLP_IMG_WIDTH - nMargin; j++)
784  {
785  if (pDisparitiesCopy[i * OLP_IMG_WIDTH + j] == 0)
786  {
787  int nSum = 0;
788  int nNumValidDisparities = 0;
789 
790  for (int k = -nHoleFillingAveragingRadius; k <= nHoleFillingAveragingRadius;
791  k++)
792  {
793  for (int l = -nHoleFillingAveragingRadius; l <= nHoleFillingAveragingRadius;
794  l++)
795  {
796  if (pDisparitiesCopy[(i + k) * OLP_IMG_WIDTH + (j + l)] > 0)
797  {
798  nSum += pDisparitiesCopy[(i + k) * OLP_IMG_WIDTH + (j + l)];
799  nNumValidDisparities++;
800  }
801  }
802  }
803 
804  if (nNumValidDisparities > 0)
805  {
806  pDisparities[i * OLP_IMG_WIDTH + j] = nSum / nNumValidDisparities;
807  }
808  }
809  }
810  }
811  }
812 
813 
814  // visualize
815  CByteImage* pRectifiedDisparityImage =
816  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
817 
818  for (int i = 0; i < OLP_IMG_HEIGHT; i++)
819  {
820  for (int j = 0; j < OLP_IMG_WIDTH; j++)
821  {
822  int nDisp = pDisparities[i * OLP_IMG_WIDTH + j];
823  pRectifiedDisparityImage->pixels[i * OLP_IMG_WIDTH + j] = (nDisp > 255) ? 255 : nDisp;
824  }
825  }
826 
827  // get unrectified disparity image
828  ImageProcessor::Zero(pDisparityImage);
829  Vec2d vRectPos, vUnRectPos;
830  Mat3d mRectificationHomography = pStereoCalibration->rectificationHomographyLeft;
831 
832  for (int i = 0; i < OLP_IMG_HEIGHT; i++)
833  {
834  for (int j = 0; j < OLP_IMG_WIDTH; j++)
835  {
836  vRectPos.x = j;
837  vRectPos.y = i;
838  Math2d::ApplyHomography(mRectificationHomography, vRectPos, vUnRectPos);
839 
840  if (0 <= vUnRectPos.y && vUnRectPos.y < OLP_IMG_HEIGHT && 0 <= vUnRectPos.x &&
841  vUnRectPos.x < OLP_IMG_WIDTH)
842  {
843  pDisparityImage->pixels[(int)vUnRectPos.y * OLP_IMG_WIDTH + (int)vUnRectPos.x] =
844  pRectifiedDisparityImage->pixels[i * OLP_IMG_WIDTH + j];
845  }
846  }
847  }
848 
849  COLPTools::FillHolesGray(pDisparityImage, pRectifiedDisparityImage, 1);
850  COLPTools::FillHolesGray(pRectifiedDisparityImage, pDisparityImage, 1);
851  delete pRectifiedDisparityImage;
852 
853 
854  // get smoothed disparity
855  float* pSmoothedDisparity = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
856  ;
857 
858  if (true)
859  {
860  float* pSmoothedDisparity1 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
861  float* pSmoothedDisparity2 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
862  float* pSmoothedDisparity3 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
863  float* pSmoothedDisparity4 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
864 #pragma omp parallel sections
865  {
866 #pragma omp section
867  {
868  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity1, 0);
869  }
870 
871 #pragma omp section
872  {
873  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity2, 1);
874  }
875 
876 #pragma omp section
877  {
878  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity3, 2);
879  }
880 
881 #pragma omp section
882  {
883  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity4, 4);
884  }
885  }
886 
887  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
888  {
889  pSmoothedDisparity[i] = 0.25f * (pSmoothedDisparity1[i] + pSmoothedDisparity2[i] +
890  pSmoothedDisparity3[i] + pSmoothedDisparity4[i]);
891  }
892 
893  delete[] pSmoothedDisparity1;
894  delete[] pSmoothedDisparity2;
895  delete[] pSmoothedDisparity3;
896  delete[] pSmoothedDisparity4;
897  }
898  else
899  {
900  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
901  {
902  pSmoothedDisparity[i] = pDisparities[i];
903  }
904  }
905 
906 
907  const float fDispMin = nDispMin;
908  const float fDispMax = nDispMax;
909  CDynamicArray afSIFTDescriptors(4);
910 
911  for (int i = 0; i < OLP_IMG_HEIGHT; i += nDisparityPointDistance)
912  {
913  for (int j = 0; j < OLP_IMG_WIDTH; j += nDisparityPointDistance)
914  {
915  //fDisp = m3.at<short>(i,j)/16;
916  float fDisp = pSmoothedDisparity[i * OLP_IMG_WIDTH + j];
917 
918  if ((fDisp > fDispMin) && (fDisp < fDispMax))
919  {
920  //ARMARX_VERBOSE_S << << "disparity: %d ", nDisp);
921  vImgPointLeft.x = j;
922  vImgPointLeft.y = i;
923  vImgPointRight.x = j - fDisp;
924  vImgPointRight.y = i;
925 
926  pStereoCalibration->Calculate3DPoint(
927  vImgPointLeft, vImgPointRight, vPoint3D, true, false);
928  //ARMARX_VERBOSE_S << << "z: %.0f\n", vPoint3D.z);
929 
930  if (vPoint3D.z < OLP_MAX_OBJECT_DISTANCE)
931  {
932  // create point descriptor
933  CHypothesisPoint* pNewPoint = new CHypothesisPoint();
935  Math3d::SetVec(pNewPoint->vPosition, vPoint3D);
936  Math3d::SetVec(pNewPoint->vOldPosition, vPoint3D);
937  pNewPoint->pMSERDescriptor = NULL;
938  pNewPoint->pFeatureDescriptors = new CSIFTFeatureArray();
939  pNewPoint->fMembershipProbability = 0;
940  const float fIntensity =
941  pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j)] +
942  pRectifiedImageLeftColorGaussFiltered
943  ->pixels[3 * (i * OLP_IMG_WIDTH + j) + 1] +
944  pRectifiedImageLeftColorGaussFiltered
945  ->pixels[3 * (i * OLP_IMG_WIDTH + j) + 2] +
946  3;
947  pNewPoint->fIntensity = (fIntensity - 3) * fIntensityFactor;
948  pNewPoint->fColorR = (pRectifiedImageLeftColorGaussFiltered
949  ->pixels[3 * (i * OLP_IMG_WIDTH + j)] +
950  1) /
951  fIntensity;
952  pNewPoint->fColorG = (pRectifiedImageLeftColorGaussFiltered
953  ->pixels[3 * (i * OLP_IMG_WIDTH + j) + 1] +
954  1) /
955  fIntensity;
956  pNewPoint->fColorB = (pRectifiedImageLeftColorGaussFiltered
957  ->pixels[3 * (i * OLP_IMG_WIDTH + j) + 2] +
958  1) /
959  fIntensity;
960 
961  // create sift descriptor for point
962  m_pSIFTFeatureCalculator->CreateSIFTDescriptors(
963  pRectifiedImageLeftGrey, &afSIFTDescriptors, j, i, 1.0f, false, false);
964 
965  if (afSIFTDescriptors.GetSize() > 0)
966  {
967  pNewPoint->pFeatureDescriptors->AddElement(
968  (CSIFTFeatureEntry*)((CSIFTFeatureEntry*)afSIFTDescriptors[0])
969  ->Clone());
970  (*pNewPoint->pFeatureDescriptors)[0]->point3d = vPoint3D;
971  // cleanup
972  CSIFTFeatureEntry* pFeature = (CSIFTFeatureEntry*)afSIFTDescriptors[0];
973  afSIFTDescriptors[0]->bDelete = false;
974  afSIFTDescriptors.Clear();
975  delete pFeature;
976  }
977 
978  aPointsFromDisparity.push_back(pNewPoint);
979  }
980  }
981  else
982  {
983  //ARMARX_VERBOSE_S << << "invalid disparity: %d\n", nDisp);
984  }
985  }
986  }
987 
988  if (pAll3DPoints)
989  {
990  for (int i = 0; i < OLP_IMG_HEIGHT; i += nDisparityPointDistance)
991  {
992  for (int j = 0; j < OLP_IMG_WIDTH; j += nDisparityPointDistance)
993  {
994  float fDisp = pSmoothedDisparity[i * OLP_IMG_WIDTH + j];
995  vImgPointLeft.x = j;
996  vImgPointLeft.y = i;
997  vImgPointRight.x = j - fDisp;
998  vImgPointRight.y = i;
999  pStereoCalibration->Calculate3DPoint(
1000  vImgPointLeft, vImgPointRight, vPoint3D, true, false);
1001 
1002  if (Math3d::Length(vPoint3D) > 2 * OLP_MAX_OBJECT_DISTANCE)
1003  {
1004  if (aPointsFromDisparity.size() > 0)
1005  {
1006  Math3d::SetVec(vPoint3D, aPointsFromDisparity.at(0)->vPosition);
1007  }
1008  }
1009 
1010  pAll3DPoints->push_back(vPoint3D);
1011  }
1012  }
1013  }
1014 
1015 
1016  delete pRectifiedImageLeftGrey;
1017  delete pRectifiedImageRightGrey;
1018  delete pRectifiedImageLeftGreyHalfsize;
1019  delete pRectifiedImageRightGreyHalfsize;
1020  delete pRectifiedImageLeftGreyQuartersize;
1021  delete pRectifiedImageRightGreyQuartersize;
1022  delete pRectifiedImageLeftColorGaussFiltered;
1023  delete pStereoMatcher;
1024  delete pStereoCalibrationCopy;
1025  cvReleaseImageHeader(&pRectifiedIplImageLeft);
1026  cvReleaseImageHeader(&pRectifiedIplImageRight);
1027  delete[] pSmoothedDisparity;
1028 }
1029 
1030 void
1031 CFeatureCalculation::CalculateSmoothedDisparityImage(float* pInputDisparity,
1032  float* pSmoothedDisparity,
1033  const int nRadius)
1034 {
1035  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
1036  {
1037  pSmoothedDisparity[i] = pInputDisparity[i];
1038  }
1039 
1040  for (int i = nRadius; i < OLP_IMG_HEIGHT - nRadius; i++)
1041  {
1042  for (int j = nRadius; j < OLP_IMG_WIDTH - nRadius; j++)
1043  {
1044  int nIndex = i * OLP_IMG_WIDTH + j;
1045 
1046  if (pInputDisparity[nIndex] != 0)
1047  {
1048  float fSum = 0;
1049  int nNumPixels = 0;
1050 
1051  for (int l = -nRadius; l <= nRadius; l++)
1052  {
1053  for (int k = -nRadius; k <= nRadius; k++)
1054  {
1055  int nTempIndex = nIndex + l * OLP_IMG_WIDTH + k;
1056 
1057  if (pInputDisparity[nTempIndex] != 0)
1058  {
1059  fSum += pInputDisparity[nTempIndex];
1060  nNumPixels++;
1061  }
1062  }
1063  }
1064 
1065  pSmoothedDisparity[nIndex] = fSum / (float)nNumPixels;
1066  }
1067  }
1068  }
1069 }
1070 
1071 Vec3d
1072 CFeatureCalculation::GetCorresponding3DPoint(
1073  const Vec2d point2D,
1074  const pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointcloud)
1075 {
1076  Vec3d result;
1077  const int width = pointcloud->width;
1078  const int height = pointcloud->height;
1079  const int x = (point2D.x < 0) ? 0 : ((point2D.x > width - 1) ? width - 1 : point2D.x);
1080  const int y = (point2D.y < 0) ? 0 : ((point2D.y > height - 1) ? height - 1 : point2D.y);
1081  const int index = y * width + x;
1082  pcl::PointXYZRGBA point3D = pointcloud->at(index);
1083  result.x = point3D.x;
1084  result.y = point3D.y;
1085  result.z = point3D.z;
1086  return result;
1087 }
CMSERDescriptor
Definition: ObjectHypothesis.h:34
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
CMSERDescriptor3D::vSigmaPoint1a
Vec3d vSigmaPoint1a
Definition: ObjectHypothesis.h:158
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:737
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CHypothesisPoint::pMSERDescriptor
CMSERDescriptor3D * pMSERDescriptor
Definition: ObjectHypothesis.h:241
CHypothesisPoint::vOldPosition
Vec3d vOldPosition
Definition: ObjectHypothesis.h:236
index
uint8_t index
Definition: EtherCATFrame.h:59
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:164
CHypothesisPoint::fIntensity
float fIntensity
Definition: ObjectHypothesis.h:238
OLP_HARRIS_POINT_DISTANCE
#define OLP_HARRIS_POINT_DISTANCE
Definition: ObjectLearningByPushingDefinitions.h:258
FeatureCalculation.h
CMSERDescriptor::vEigenvector1
Vec2d vEigenvector1
Definition: ObjectHypothesis.h:86
CMSERDescriptor3D::vSigmaPoint2a
Vec3d vSigmaPoint2a
Definition: ObjectHypothesis.h:158
OLP_HARRIS_POINT_QUALITY
#define OLP_HARRIS_POINT_QUALITY
Definition: ObjectLearningByPushingDefinitions.h:260
MSERCalculation.h
CHypothesisPoint::fColorG
float fColorG
Definition: ObjectHypothesis.h:238
OLP_MAX_OBJECT_DISTANCE
#define OLP_MAX_OBJECT_DISTANCE
Definition: ObjectLearningByPushingDefinitions.h:106
CMSERDescriptor::vEigenvector2
Vec2d vEigenvector2
Definition: ObjectHypothesis.h:86
CMSERCalculation::FindMSERs3D
static void FindMSERs3D(const CByteImage *pByteImageLeft, const CByteImage *pByteImageRight, CStereoCalibration *pStereoCalibration, const float fToleranceFactor, std::vector< CMSERDescriptor3D * > &aRegions3D)
Definition: MSERCalculation.cpp:262
CHypothesisPoint::vPosition
Vec3d vPosition
Definition: ObjectHypothesis.h:235
CMSERDescriptor3D::vPosition
Vec3d vPosition
Definition: ObjectHypothesis.h:156
COLPTools::FillHolesGray
void FillHolesGray(const CByteImage *pInputImage, CByteImage *pOutputImage, const int nRadius)
Definition: OLPTools.cpp:1634
OLP_TOLERANCE_MODIFICATOR
#define OLP_TOLERANCE_MODIFICATOR
Definition: ObjectLearningByPushingDefinitions.h:99
CMSERDescriptor3D::vSigmaPoint1b
Vec3d vSigmaPoint1b
Definition: ObjectHypothesis.h:158
OLPTools.h
CHypothesisPoint
Definition: ObjectHypothesis.h:166
CMSERDescriptor3D
Definition: ObjectHypothesis.h:94
CHypothesisPoint::pFeatureDescriptors
CSIFTFeatureArray * pFeatureDescriptors
Definition: ObjectHypothesis.h:240
CMSERDescriptor::fEigenvalue1
float fEigenvalue1
Definition: ObjectHypothesis.h:84
CMSERDescriptor3D::pRegionLeft
CMSERDescriptor * pRegionLeft
Definition: ObjectHypothesis.h:153
CHypothesisPoint::eDepthMapPoint
@ eDepthMapPoint
Definition: ObjectHypothesis.h:232
CMSERDescriptor3D::vSigmaPoint2b
Vec3d vSigmaPoint2b
Definition: ObjectHypothesis.h:159
CHypothesisPoint::fColorR
float fColorR
Definition: ObjectHypothesis.h:238
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:736
CMSERDescriptor::vMean
Vec2d vMean
Definition: ObjectHypothesis.h:82
armarx::ctrlutil::v
double v(double t, double v0, double a0, double j)
Definition: CtrlUtil.h:39
float
#define float
Definition: 16_Level.h:22
CFeatureCalculation::CFeatureCalculation
CFeatureCalculation()
Definition: FeatureCalculation.cpp:57
CFeatureCalculation::~CFeatureCalculation
~CFeatureCalculation()
Definition: FeatureCalculation.cpp:68
OLP_MIN_X_VALUE_SIFT_POINTS
#define OLP_MIN_X_VALUE_SIFT_POINTS
Definition: ObjectLearningByPushingDefinitions.h:323
CHypothesisPoint::ePointType
EPointType ePointType
Definition: ObjectHypothesis.h:237
CFeatureCalculation::GetAllFeaturePoints
void GetAllFeaturePoints(const CByteImage *pImageLeftColor, const CByteImage *pImageRightColor, const CByteImage *pImageLeftGrey, const CByteImage *pImageRightGrey, const int nDisparityPointDistance, CStereoCalibration *pStereoCalibration, CSIFTFeatureArray &aAllSIFTPoints, std::vector< CMSERDescriptor3D * > &aAllMSERs, std::vector< CHypothesisPoint * > &aPointsFromDepthImage, CByteImage *pDisparityImage, std::vector< Vec3d > *pAll3DPoints=NULL)
Definition: FeatureCalculation.cpp:75
ARMARX_VERBOSE_S
#define ARMARX_VERBOSE_S
Definition: Logging.h:207
CMSERCalculation::FindMSERs2D
static void FindMSERs2D(const CByteImage *pRGBImage, const CByteImage *pHSVImage, std::vector< CMSERDescriptor * > &aMSERDescriptors)
Definition: MSERCalculation.cpp:51
Logging.h
CMSERDescriptor::fEigenvalue2
float fEigenvalue2
Definition: ObjectHypothesis.h:84
CHypothesisPoint::fColorB
float fColorB
Definition: ObjectHypothesis.h:238
CHypothesisPoint::fMembershipProbability
float fMembershipProbability
Definition: ObjectHypothesis.h:239