FeatureCalculation.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 #include "FeatureCalculation.h"
25 
26 #include "MSERCalculation.h"
27 #include "OLPTools.h"
28 
29 
30 // IVT
31 #include <Math/Math3d.h>
32 #include <Image/ByteImage.h>
33 #include <Image/ImageProcessor.h>
34 #include <Image/IplImageAdaptor.h>
35 #include <Features/SIFTFeatures/SIFTFeatureEntry.h>
36 #include <Features/SIFTFeatures/SIFTFeatureCalculator.h>
37 #include <Image/StereoMatcher.h>
38 #include <Calibration/StereoCalibration.h>
39 #include <Calibration/Calibration.h>
40 #include <Calibration/Rectification.h>
41 #include <Threading/Threading.h>
42 
43 #include <opencv2/calib3d.hpp>
44 
45 
46 // OpenCV
47 #include <opencv2/opencv.hpp>
48 
49 // OpenMP
50 #include <omp.h>
51 
52 // system
53 #include <sys/time.h>
54 
56 
57 
58 
59 
61 {
62  // create SIFTFeatureCalculator
63  m_pSIFTFeatureCalculator = new CSIFTFeatureCalculator();
64  // create HarrisSIFTFeatureCalculator
65  //m_pHarrisSIFTFeatureCalculator = new CHarrisSIFTFeatureCalculator(0.0002f, 3, 1000); // 0.01f, 3, 500
66  //CDynamicArray* pResultList = new CDynamicArray(100);
67 
68  m_pInterestPoints = new Vec2d[m_nMaxNumInterestPoints];
69 }
70 
72 {
73  //delete m_pSIFTFeatureCalculator;
74  delete[] m_pInterestPoints;
75 }
76 
77 
78 void CFeatureCalculation::GetAllFeaturePoints(const CByteImage* pImageLeftColor, const CByteImage* pImageRightColor, const CByteImage* pImageLeftGrey, const CByteImage* pImageRightGrey, const int nDisparityPointDistance, CStereoCalibration* pStereoCalibration,
79  CSIFTFeatureArray& aAllSIFTPoints, std::vector<CMSERDescriptor3D*>& aAllMSERs, std::vector<CHypothesisPoint*>& aPointsFromDepthImage, CByteImage* pDisparityImage, std::vector<Vec3d>* pAll3DPoints)
80 {
81  timeval tStart, tEnd;
82  gettimeofday(&tStart, 0);
83  omp_set_nested(true);
84 
85  #pragma omp parallel sections
86  {
87 
88  //**************************************************************************************************************
89  // calculate Harris interest points and find stereo correspondences
90  //**************************************************************************************************************
91 
92 
93  #pragma omp section
94  {
95  const int nNumFeatures = ImageProcessor::CalculateHarrisInterestPoints(pImageLeftGrey, m_pInterestPoints, m_nMaxNumInterestPoints, OLP_HARRIS_POINT_QUALITY, OLP_HARRIS_POINT_DISTANCE); // 0.001
96  //ARMARX_VERBOSE_S << << " --- Number of Features: %d ---\n", nNumFeatures);
97 
98  // search corresponding features in right image
99  // create StereoMatcher
100  CStereoMatcher* stereoMatcher = new CStereoMatcher();
101  stereoMatcher->InitCameraParameters(pStereoCalibration, false);
102  const int nDispMin = stereoMatcher->GetDisparityEstimate(4 * OLP_MAX_OBJECT_DISTANCE);
103  const int nDispMax = stereoMatcher->GetDisparityEstimate(0.2f * OLP_MAX_OBJECT_DISTANCE);
104 
105  #pragma omp parallel
106  {
107  CDynamicArray afSIFTDescriptors(4);
108 
109  #pragma omp for schedule(static, 32)
110 
111  for (int i = 0; i < nNumFeatures; i++)
112  {
113  if (m_pInterestPoints[i].x > OLP_MIN_X_VALUE_SIFT_POINTS)
114  {
115  Vec2d vCorrespondingPointRight;
116  Vec3d vPoint3D;
117 
118  // img l, img r, px, py, size of correlation window, min disparity, max disparity,
119  // corresponding point 2d, 3d point, correlation threshold, images are undistorted
120  int nMatchingResult = stereoMatcher->Match(pImageLeftGrey, pImageRightGrey, (int) m_pInterestPoints[i].x, (int) m_pInterestPoints[i].y,
121  10, nDispMin, nDispMax, vCorrespondingPointRight, vPoint3D, 0.7f, true);
122 
123  if (nMatchingResult >= 0)
124  {
125  if (vPoint3D.z < OLP_MAX_OBJECT_DISTANCE)
126  {
127  m_pSIFTFeatureCalculator->CreateSIFTDescriptors(pImageLeftGrey, &afSIFTDescriptors, m_pInterestPoints[i].x,
128  m_pInterestPoints[i].y, 1.0f, false, false);
129 
130  if (afSIFTDescriptors.GetSize() > 0)
131  {
132  #pragma omp critical
133  {
134  aAllSIFTPoints.AddElement((CSIFTFeatureEntry*)((CSIFTFeatureEntry*)afSIFTDescriptors[0])->Clone());
135  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d = vPoint3D;
136  }
137  CSIFTFeatureEntry* pFeature = (CSIFTFeatureEntry*)afSIFTDescriptors[0];
138  afSIFTDescriptors[0]->bDelete = false;
139  afSIFTDescriptors.Clear();
140  delete pFeature;
141  }
142  }
143  }
144  }
145  }
146  }
147 
148  delete stereoMatcher;
149  }
150 
151 
152 
153  //**************************************************************************************************************
154  // find MSER regions
155  //**************************************************************************************************************
156 
157 
158  #pragma omp section
159  {
160 #ifdef OLP_USE_MSERS
161  CMSERCalculation::FindMSERs3D(pImageLeftColor, pImageRightColor, pStereoCalibration, OLP_TOLERANCE_MODIFICATOR, aAllMSERs);
162 #endif
163 
164  //gettimeofday(&tEnd, 0);
165  //tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
166  //ARMARX_VERBOSE_S << << "Time for stereo correspondences: %d ms\n", tTimeDiff);
167 
168 
169  //ARMARX_VERBOSE_S << << "z distances:\n");
170  //for (int i=0; i<vFeaturePoints3d.GetSize(); i++)
171  // ARMARX_VERBOSE_S << << "%d ", (int)vFeaturePoints3d[i].z);
172  }
173 
174 
175  //**************************************************************************************************************
176  // get 3-d points from a disparity map
177  //**************************************************************************************************************
178 
179  #pragma omp section
180  {
181 #ifdef OLP_USE_DEPTH_MAP
182 
183  //timeval tStart, tEnd;
184  //gettimeofday(&tStart, 0);
185 
186  GetPointsFromDisparity(pImageLeftColor, pImageRightColor, pImageLeftGrey, pImageRightGrey, pStereoCalibration, nDisparityPointDistance, aPointsFromDepthImage, pDisparityImage, pAll3DPoints);
187 
188  //ARMARX_VERBOSE_S << << "\nGot %d points from the disparity map\n\n", (int)aPointsFromDisparity.size());
189 
190  //gettimeofday(&tEnd, 0);
191  //long tTimeDiff = (1000*tEnd.tv_sec+tEnd.tv_usec/1000) - (1000*tStart.tv_sec+tStart.tv_usec/1000);
192  //ARMARX_VERBOSE_S << << "Time for disparity: %ld ms\n", tTimeDiff);
193 
194 #endif
195  }
196 
197  }
198 
199  gettimeofday(&tEnd, 0);
200  long tTimeDiff = (1000 * tEnd.tv_sec + tEnd.tv_usec / 1000) - (1000 * tStart.tv_sec + tStart.tv_usec / 1000);
201  ARMARX_VERBOSE_S << "Time for GetAllFeaturePoints(): " << tTimeDiff << " ms";
202 
203 }
204 
205 
206 
207 #ifdef OLP_USE_DEPTH_MAP
208 
209 void CFeatureCalculation::GetAllFeaturePoints(const CByteImage* pImageLeftColor, const CByteImage* pImageLeftGrey, const pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointcloud, const int nDisparityPointDistance,
210  CSIFTFeatureArray& aAllSIFTPoints, std::vector<CMSERDescriptor3D*>& aAllMSERs, std::vector<CHypothesisPoint*>& aPointsFromDepthImage, const CCalibration* calibration)
211 {
212  timeval tStart, tEnd;
213  gettimeofday(&tStart, 0);
214  omp_set_nested(true);
215 
216  #pragma omp parallel sections
217  {
218 
219  //**************************************************************************************************************
220  // calculate Harris interest points and find stereo correspondences
221  //**************************************************************************************************************
222 
223 
224  #pragma omp section
225  {
226  const int nNumFeatures = ImageProcessor::CalculateHarrisInterestPoints(pImageLeftGrey, m_pInterestPoints, m_nMaxNumInterestPoints, OLP_HARRIS_POINT_QUALITY, OLP_HARRIS_POINT_DISTANCE); // 0.001
227  //ARMARX_VERBOSE_S << << " --- Number of Features: %d ---\n", nNumFeatures);
228 
229  #pragma omp parallel
230  {
231  CDynamicArray afSIFTDescriptors(4);
232 
233  #pragma omp for schedule(static, 32)
234  for (int i = 0; i < nNumFeatures; i++)
235  {
236  if (m_pInterestPoints[i].x > OLP_MIN_X_VALUE_SIFT_POINTS)
237  {
238  Vec3d point3D = GetCorresponding3DPoint(m_pInterestPoints[i], pointcloud);
239 
240  if (point3D.z > 0)
241  {
242  if (point3D.z < OLP_MAX_OBJECT_DISTANCE)
243  {
244  m_pSIFTFeatureCalculator->CreateSIFTDescriptors(pImageLeftGrey, &afSIFTDescriptors, m_pInterestPoints[i].x,
245  m_pInterestPoints[i].y, 1.0f, false, false);
246 
247  if (afSIFTDescriptors.GetSize() > 0)
248  {
249  #pragma omp critical
250  {
251  aAllSIFTPoints.AddElement((CSIFTFeatureEntry*)((CSIFTFeatureEntry*)afSIFTDescriptors[0])->Clone());
252  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d.x = point3D.x;
253  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d.y = point3D.y;
254  aAllSIFTPoints[aAllSIFTPoints.GetSize() - 1]->point3d.z = point3D.z;
255  }
256  CSIFTFeatureEntry* pFeature = (CSIFTFeatureEntry*)afSIFTDescriptors[0];
257  afSIFTDescriptors[0]->bDelete = false;
258  afSIFTDescriptors.Clear();
259  delete pFeature;
260  }
261  }
262  }
263  }
264  }
265  }
266  }
267 
268 
269 
270  //**************************************************************************************************************
271  // find MSER regions
272  //**************************************************************************************************************
273 
274 
275  #pragma omp section
276  {
277 #ifdef OLP_USE_MSERS
278 
279  CByteImage* pHSVImageLeft = new CByteImage(pImageLeftColor);
280  ImageProcessor::CalculateHSVImage(pImageLeftColor, pHSVImageLeft);
281 
282  std::vector<CMSERDescriptor*> aMSERDescriptors2D;
283  CMSERCalculation::FindMSERs2D(pImageLeftColor, pHSVImageLeft, aMSERDescriptors2D);
284 
285  for (size_t i = 0; i < aMSERDescriptors2D.size(); i++)
286  {
287  CMSERDescriptor* descriptor2D = aMSERDescriptors2D.at(i);
288  CMSERDescriptor3D* descriptor3D = new CMSERDescriptor3D();
289  descriptor3D->pRegionLeft = descriptor2D;
290 
291  const Vec2d mean2D = descriptor2D->vMean;
292  descriptor3D->vPosition = GetCorresponding3DPoint(mean2D, pointcloud);
293 
294  Vec2d vSigmaPoint2D, vTemp;
295 
296  Math2d::MulVecScalar(descriptor2D->vEigenvector1, -sqrtf(descriptor2D->fEigenvalue1), vTemp);
297  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
298  descriptor3D->vSigmaPoint1a = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
299 
300  Math2d::MulVecScalar(descriptor2D->vEigenvector1, sqrtf(descriptor2D->fEigenvalue1), vTemp);
301  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
302  descriptor3D->vSigmaPoint1b = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
303 
304  Math2d::MulVecScalar(descriptor2D->vEigenvector2, -sqrtf(descriptor2D->fEigenvalue2), vTemp);
305  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
306  descriptor3D->vSigmaPoint2a = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
307 
308  Math2d::MulVecScalar(descriptor2D->vEigenvector2, sqrtf(descriptor2D->fEigenvalue2), vTemp);
309  Math2d::AddVecVec(mean2D, vTemp, vSigmaPoint2D);
310  descriptor3D->vSigmaPoint2b = GetCorresponding3DPoint(vSigmaPoint2D, pointcloud);
311 
312  aAllMSERs.push_back(descriptor3D);
313  }
314 
315 #endif
316 
317  }
318 
319 
320  //**************************************************************************************************************
321  // get 3D points from the depth map
322  //**************************************************************************************************************
323 
324  #pragma omp section
325  {
326  const float fIntensityFactor = 1.0f / 255.0f;
327  const size_t width = pointcloud->width;
328  const size_t height = pointcloud->height;
329 
330  CByteImage* leftImageGaussFiltered = new CByteImage(pImageLeftColor);
331  ::ImageProcessor::GaussianSmooth3x3(pImageLeftColor, leftImageGaussFiltered);
332 
333  for (size_t i = 0; i < height; i += nDisparityPointDistance)
334  {
335  for (size_t j = 0; j < width; j += nDisparityPointDistance)
336  {
337  const pcl::PointXYZRGBA point3D = pointcloud->at(i * width + j);
338 
339  if (point3D.z > 0)
340  {
341  if (point3D.z < OLP_MAX_OBJECT_DISTANCE)
342  {
343  // create point descriptor
344  CHypothesisPoint* pNewPoint = new CHypothesisPoint();
346  Vec3d vPoint3D = {point3D.x, point3D.y, point3D.z};
347  Math3d::SetVec(pNewPoint->vPosition, vPoint3D);
348  Math3d::SetVec(pNewPoint->vOldPosition, vPoint3D);
349  pNewPoint->pMSERDescriptor = NULL;
350  pNewPoint->pFeatureDescriptors = new CSIFTFeatureArray();
351  pNewPoint->fMembershipProbability = 0;
352 
353  Vec2d vPoint2D;
354  calibration->CameraToImageCoordinates(vPoint3D, vPoint2D, false);
355  int u = (int)(vPoint2D.x + 0.5f);
356  int v = (int)(vPoint2D.y + 0.5f);
357  u = (u < 0) ? 0 : ((u > leftImageGaussFiltered->width - 1) ? leftImageGaussFiltered->width - 1 : u);
358  v = (v < 0) ? 0 : ((v > leftImageGaussFiltered->height - 1) ? leftImageGaussFiltered->height - 1 : v);
359  float r, g, b;
360  const float weight = 1.0f;
361  r = weight * leftImageGaussFiltered->pixels[3 * (v * leftImageGaussFiltered->width + u)] + (1.0f - weight) * point3D.r;
362  g = weight * leftImageGaussFiltered->pixels[3 * (v * leftImageGaussFiltered->width + u) + 1] + (1.0f - weight) * point3D.g;
363  b = weight * leftImageGaussFiltered->pixels[3 * (v * leftImageGaussFiltered->width + u) + 2] + (1.0f - weight) * point3D.b;
364 
365  const float fIntensity = (r + g + b) / 3.0f;
366  const float normalizationFactor = 1.0f / (fIntensity + 3.0f);
367  pNewPoint->fIntensity = fIntensity * fIntensityFactor;
368  pNewPoint->fColorR = normalizationFactor * (r + 1);
369  pNewPoint->fColorG = normalizationFactor * (g + 1);
370  pNewPoint->fColorB = normalizationFactor * (b + 1);
371 
372  aPointsFromDepthImage.push_back(pNewPoint);
373  }
374  }
375  }
376  }
377 
378  delete leftImageGaussFiltered;
379  }
380 
381  }
382 
383  gettimeofday(&tEnd, 0);
384  long tTimeDiff = (1000 * tEnd.tv_sec + tEnd.tv_usec / 1000) - (1000 * tStart.tv_sec + tStart.tv_usec / 1000);
385  ARMARX_VERBOSE_S << "Time for GetAllFeaturePoints(): " << tTimeDiff << " ms";
386 
387 }
388 
389 #endif
390 
391 static void doStereoSGBM(int minDisparity, int numDisparities, int nSADWindowSize, int P1, int P2, int disp12MaxDiff,
392  cv::Mat leftInput, cv::Mat rightInput, cv::Mat output)
393 {
394  int preFilterCap = 0;
395  int uniquenessRatio = 0;
396  int speckleWindowSize = 0;
397  int speckleRange = 0;
398 #if CV_MAJOR_VERSION == 2
399  bool fullDP = true;
400  cv::StereoSGBM stereoSGBM(minDisparity, numDisparities, nSADWindowSize, P1, P2, disp12MaxDiff, preFilterCap, uniquenessRatio, speckleWindowSize, speckleRange, fullDP);
401  stereoSGBM(leftInput, rightInput, output);
402 #else
403  cv::Ptr<cv::StereoSGBM> stereoSGBM = cv::StereoSGBM::create(minDisparity, numDisparities, nSADWindowSize, P1, P2, disp12MaxDiff, preFilterCap, uniquenessRatio, speckleWindowSize, speckleRange, cv::StereoSGBM::MODE_SGBM);
404  stereoSGBM->compute(leftInput, rightInput, output);
405 #endif
406 }
407 
408 
409 
410 void CFeatureCalculation::GetPointsFromDisparity(const CByteImage* pImageLeftColor, const CByteImage* pImageRightColor, const CByteImage* pImageLeftGrey, const CByteImage* pImageRightGrey, CStereoCalibration* pStereoCalibration,
411  const int nDisparityPointDistance, std::vector<CHypothesisPoint*>& aPointsFromDisparity, CByteImage* pDisparityImage, std::vector<Vec3d>* pAll3DPoints)
412 {
413  // rectify images
414  CByteImage* pRectifiedImageLeftGrey, *pRectifiedImageRightGrey, *pRectifiedImageLeftGreyHalfsize, *pRectifiedImageRightGreyHalfsize, *pRectifiedImageLeftGreyQuartersize, *pRectifiedImageRightGreyQuartersize;
415  IplImage* pRectifiedIplImageLeft, *pRectifiedIplImageRight, *pRectifiedIplImageLeftHalfsize, *pRectifiedIplImageRightHalfsize, *pRectifiedIplImageLeftQuartersize, *pRectifiedIplImageRightQuartersize;
416  CByteImage* pRectifiedImageLeftColorGaussFiltered;
417  cv::Mat mDispImg, mDispImgHalfsize, mDispImgQuartersize;
418  bool bGreyImagesReady = false;
419  bool bGreyImagesHalfsizeReady = false;
420  bool bGreyImagesQuartersizeReady = false;
421 
422  #pragma omp parallel sections
423  {
424  #pragma omp section
425  {
426  const CByteImage* ppOriginalImages[2];
427  ppOriginalImages[0] = pImageLeftGrey;
428  ppOriginalImages[1] = pImageRightGrey;
429  pRectifiedImageLeftGrey = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
430  pRectifiedImageRightGrey = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
431  CByteImage* ppRectifiedImages[2];
432  ppRectifiedImages[0] = pRectifiedImageLeftGrey;
433  ppRectifiedImages[1] = pRectifiedImageRightGrey;
434 
435  CRectification* pRectification = new CRectification(true, false);
436  pRectification->Init(pStereoCalibration);
437  pRectification->Rectify(ppOriginalImages, ppRectifiedImages);
438 
439  pRectifiedIplImageLeft = IplImageAdaptor::Adapt(pRectifiedImageLeftGrey);
440  pRectifiedIplImageRight = IplImageAdaptor::Adapt(pRectifiedImageRightGrey);
441  bGreyImagesReady = true;
442 
443  // half size
444  pRectifiedImageLeftGreyHalfsize = new CByteImage(OLP_IMG_WIDTH / 2, OLP_IMG_HEIGHT / 2, CByteImage::eGrayScale);
445  pRectifiedImageRightGreyHalfsize = new CByteImage(OLP_IMG_WIDTH / 2, OLP_IMG_HEIGHT / 2, CByteImage::eGrayScale);
446  ImageProcessor::Resize(pRectifiedImageLeftGrey, pRectifiedImageLeftGreyHalfsize);
447  ImageProcessor::Resize(pRectifiedImageRightGrey, pRectifiedImageRightGreyHalfsize);
448 
449  pRectifiedIplImageLeftHalfsize = IplImageAdaptor::Adapt(pRectifiedImageLeftGreyHalfsize);
450  pRectifiedIplImageRightHalfsize = IplImageAdaptor::Adapt(pRectifiedImageRightGreyHalfsize);
451  bGreyImagesHalfsizeReady = true;
452 
453  // quarter size
454  pRectifiedImageLeftGreyQuartersize = new CByteImage(OLP_IMG_WIDTH / 4, OLP_IMG_HEIGHT / 4, CByteImage::eGrayScale);
455  pRectifiedImageRightGreyQuartersize = new CByteImage(OLP_IMG_WIDTH / 4, OLP_IMG_HEIGHT / 4, CByteImage::eGrayScale);
456  ImageProcessor::Resize(pRectifiedImageLeftGrey, pRectifiedImageLeftGreyQuartersize);
457  ImageProcessor::Resize(pRectifiedImageRightGrey, pRectifiedImageRightGreyQuartersize);
458 
459  pRectifiedIplImageLeftQuartersize = IplImageAdaptor::Adapt(pRectifiedImageLeftGreyQuartersize);
460  pRectifiedIplImageRightQuartersize = IplImageAdaptor::Adapt(pRectifiedImageRightGreyQuartersize);
461  bGreyImagesQuartersizeReady = true;
462 
463  delete pRectification;
464  }
465 
466  #pragma omp section
467  {
468  const CByteImage* ppOriginalImages[2];
469  ppOriginalImages[0] = pImageLeftColor;
470  ppOriginalImages[1] = pImageRightColor;
471  CByteImage* pRectifiedImageLeftColor = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
472  CByteImage* pRectifiedImageRightColor = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
473  CByteImage* ppRectifiedImages[2];
474  ppRectifiedImages[0] = pRectifiedImageLeftColor;
475  ppRectifiedImages[1] = pRectifiedImageRightColor;
476 
477  CRectification* pRectification = new CRectification(true, false);
478  pRectification->Init(pStereoCalibration);
479  pRectification->Rectify(ppOriginalImages, ppRectifiedImages);
480 
481  pRectifiedImageLeftColorGaussFiltered = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
482  ImageProcessor::GaussianSmooth3x3(pRectifiedImageLeftColor, pRectifiedImageLeftColorGaussFiltered);
483 
484  delete pRectifiedImageLeftColor;
485  delete pRectifiedImageRightColor;
486  delete pRectification;
487  }
488 
489 
490  // get disparity using semi-global block matching
491 
492  #pragma omp section
493  {
494  while (!bGreyImagesReady)
495  {
496  Threading::YieldThread();
497  }
498 
499  // StereoSGBM::StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize, int P1=0, int P2=0, int disp12MaxDiff=0,
500  // int preFilterCap=0, int uniquenessRatio=0, int speckleWindowSize=0, int speckleRange=0, bool fullDP=false)
501  //
502  // minDisparity – Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
503  // numDisparities – Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
504  // SADWindowSize – Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.
505  // P1 – The first parameter controlling the disparity smoothness. See below.
506  // P2 – The second parameter controlling the disparity smoothness. The larger the values are, the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor pixels. The algorithm requires P2 > P1 . See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown (like 8*number_of_image_channels*SADWindowSize*SADWindowSize and 32*number_of_image_channels*SADWindowSize*SADWindowSize , respectively).
507  // disp12MaxDiff – Maximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to a non-positive value to disable the check.
508  // preFilterCap – Truncation value for the prefiltered image pixels. The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. The result values are passed to the Birchfield-Tomasi pixel cost function.
509  // uniquenessRatio – Margin in percentage by which the best (minimum) computed cost function value should “win” the second best value to consider the found match correct. Normally, a value within the 5-15 range is good enough.
510  // speckleWindowSize – Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
511  // speckleRange – Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, multiple of 16. Normally, 16 or 32 is good enough.
512  // fullDP – Set it to true to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to false.
513  const cv::Mat mLeftImg = cv::cvarrToMat(pRectifiedIplImageLeft);
514  const cv::Mat mRightImg = cv::cvarrToMat(pRectifiedIplImageRight);
515  const int nSADWindowSize = 7; // 11
516  const int nPenaltyDispDiffOne = 8 * 3 * nSADWindowSize * nSADWindowSize; // 400
517  const int nPenaltyDispDiffBiggerOne = 32 * 3 * nSADWindowSize * nSADWindowSize; // 600
518 
519  doStereoSGBM(1, 8 * 16, nSADWindowSize, nPenaltyDispDiffOne, nPenaltyDispDiffBiggerOne, 4, mLeftImg, mRightImg, mDispImg);
520  }
521 
522 
523  #pragma omp section
524  {
525  while (!bGreyImagesHalfsizeReady)
526  {
527  Threading::YieldThread();
528  }
529 
530  const cv::Mat mLeftImg = cv::cvarrToMat(pRectifiedIplImageLeftHalfsize);
531  const cv::Mat mRightImg = cv::cvarrToMat(pRectifiedIplImageRightHalfsize);
532  const int nSADWindowSize = 7;
533  const int nPenaltyDispDiffOne = 8 * 3 * nSADWindowSize * nSADWindowSize; // 400
534  const int nPenaltyDispDiffBiggerOne = 32 * 3 * nSADWindowSize * nSADWindowSize; // 600
535 
536  doStereoSGBM(1, 4 * 16, nSADWindowSize, nPenaltyDispDiffOne, nPenaltyDispDiffBiggerOne, 4, mLeftImg, mRightImg, mDispImg);
537  }
538 
539 
540  #pragma omp section
541  {
542  while (!bGreyImagesQuartersizeReady)
543  {
544  Threading::YieldThread();
545  }
546 
547  const cv::Mat mLeftImg = cv::cvarrToMat(pRectifiedIplImageLeftQuartersize);
548  const cv::Mat mRightImg = cv::cvarrToMat(pRectifiedIplImageRightQuartersize);
549  const int nSADWindowSize = 7;
550  const int nPenaltyDispDiffOne = 8 * 3 * nSADWindowSize * nSADWindowSize; // 400
551  const int nPenaltyDispDiffBiggerOne = 32 * 3 * nSADWindowSize * nSADWindowSize; // 600
552 
553  doStereoSGBM(1, 4 * 16, nSADWindowSize, nPenaltyDispDiffOne, nPenaltyDispDiffBiggerOne, 4, mLeftImg, mRightImg, mDispImg);
554  }
555  }
556 
557 
558  CStereoMatcher* pStereoMatcher = new CStereoMatcher();
559  CStereoCalibration* pStereoCalibrationCopy = new CStereoCalibration(*pStereoCalibration);
560  pStereoMatcher->InitCameraParameters(pStereoCalibrationCopy, false);
561  const int nDispMin = pStereoMatcher->GetDisparityEstimate(4000);
562  const int nDispMax = pStereoMatcher->GetDisparityEstimate(200);
563  Vec2d vImgPointLeft, vImgPointRight;
564  Vec3d vPoint3D;
565  const float fIntensityFactor = 1.0f / (3 * 255);
566 
567 
568  // combine disparities
569  float pDisparities[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
570  #pragma omp parallel for schedule(static, 80)
571  for (int i = 0; i < OLP_IMG_HEIGHT; i++)
572  {
573  for (int j = 0; j < OLP_IMG_WIDTH; j++)
574  {
575  int nDispFullsize = mDispImg.at<short>(i, j) / 16;
576  int nDispHalfsize = 2 * mDispImgHalfsize.at<short>(i / 2, j / 2) / 16;
577  int nDispQuartersize = 4 * mDispImgQuartersize.at<short>(i / 4, j / 4) / 16;
578 
579  int nDisp = 0;
580  float fDisp = 0;
581  int nNumValidDisparities = 0;
582 
583  if ((nDispFullsize > nDispMin) && (nDispFullsize < nDispMax))
584  {
585  nDisp += nDispFullsize;
586  nNumValidDisparities++;
587  }
588 
589  if ((nDispHalfsize > nDispMin) && (nDispHalfsize < nDispMax))
590  {
591  nDisp += nDispHalfsize;
592  nNumValidDisparities++;
593  }
594 
595  if ((nDispQuartersize > nDispMin) && (nDispQuartersize < nDispMax))
596  {
597  nDisp += nDispQuartersize;
598  nNumValidDisparities++;
599  }
600 
601  if (nNumValidDisparities > 0)
602  {
603  fDisp = (float)nDisp / (float)nNumValidDisparities;
604  }
605 
606  pDisparities[i * OLP_IMG_WIDTH + j] = fDisp;
607  }
608  }
609 
610 
611  // fill holes
612  if (false)
613  {
614  int pDisparitiesCopy[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
615 
616  for (int i = 0; i < OLP_IMG_HEIGHT * OLP_IMG_WIDTH; i++)
617  {
618  pDisparitiesCopy[i] = pDisparities[i];
619  }
620 
621  const int nMargin = 10;
622  const int nHoleFillingAveragingRadius = 5;
623 
624  for (int i = nMargin; i < OLP_IMG_HEIGHT - nMargin; i++)
625  {
626  for (int j = nMargin; j < OLP_IMG_WIDTH - nMargin; j++)
627  {
628  if (pDisparitiesCopy[i * OLP_IMG_WIDTH + j] == 0)
629  {
630  int nSum = 0;
631  int nNumValidDisparities = 0;
632 
633  for (int k = -nHoleFillingAveragingRadius; k <= nHoleFillingAveragingRadius; k++)
634  {
635  for (int l = -nHoleFillingAveragingRadius; l <= nHoleFillingAveragingRadius; l++)
636  {
637  if (pDisparitiesCopy[(i + k)*OLP_IMG_WIDTH + (j + l)] > 0)
638  {
639  nSum += pDisparitiesCopy[(i + k) * OLP_IMG_WIDTH + (j + l)];
640  nNumValidDisparities++;
641  }
642  }
643  }
644 
645  if (nNumValidDisparities > 0)
646  {
647  pDisparities[i * OLP_IMG_WIDTH + j] = nSum / nNumValidDisparities;
648  }
649  }
650  }
651  }
652  }
653 
654 
655  // visualize
656  CByteImage* pRectifiedDisparityImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
657 
658  for (int i = 0; i < OLP_IMG_HEIGHT; i++)
659  {
660  for (int j = 0; j < OLP_IMG_WIDTH; j++)
661  {
662  int nDisp = pDisparities[i * OLP_IMG_WIDTH + j];
663  pRectifiedDisparityImage->pixels[i * OLP_IMG_WIDTH + j] = (nDisp > 255) ? 255 : nDisp;
664  }
665  }
666 
667  // get unrectified disparity image
668  ImageProcessor::Zero(pDisparityImage);
669  Vec2d vRectPos, vUnRectPos;
670  Mat3d mRectificationHomography = pStereoCalibration->rectificationHomographyLeft;
671 
672  for (int i = 0; i < OLP_IMG_HEIGHT; i++)
673  {
674  for (int j = 0; j < OLP_IMG_WIDTH; j++)
675  {
676  vRectPos.x = j;
677  vRectPos.y = i;
678  Math2d::ApplyHomography(mRectificationHomography, vRectPos, vUnRectPos);
679 
680  if (0 <= vUnRectPos.y && vUnRectPos.y < OLP_IMG_HEIGHT && 0 <= vUnRectPos.x && vUnRectPos.x < OLP_IMG_WIDTH)
681  {
682  pDisparityImage->pixels[(int)vUnRectPos.y * OLP_IMG_WIDTH + (int)vUnRectPos.x] = pRectifiedDisparityImage->pixels[i * OLP_IMG_WIDTH + j];
683  }
684  }
685  }
686 
687  COLPTools::FillHolesGray(pDisparityImage, pRectifiedDisparityImage, 1);
688  COLPTools::FillHolesGray(pRectifiedDisparityImage, pDisparityImage, 1);
689  delete pRectifiedDisparityImage;
690 
691 
692 
693  // get smoothed disparity
694  float* pSmoothedDisparity = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];;
695 
696  if (true)
697  {
698  float* pSmoothedDisparity1 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
699  float* pSmoothedDisparity2 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
700  float* pSmoothedDisparity3 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
701  float* pSmoothedDisparity4 = new float[OLP_IMG_WIDTH * OLP_IMG_HEIGHT];
702  #pragma omp parallel sections
703  {
704  #pragma omp section
705  {
706  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity1, 0);
707  }
708 
709  #pragma omp section
710  {
711  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity2, 1);
712  }
713 
714  #pragma omp section
715  {
716  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity3, 2);
717  }
718 
719  #pragma omp section
720  {
721  CalculateSmoothedDisparityImage(pDisparities, pSmoothedDisparity4, 4);
722  }
723  }
724 
725  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
726  {
727  pSmoothedDisparity[i] = 0.25f * (pSmoothedDisparity1[i] + pSmoothedDisparity2[i] + pSmoothedDisparity3[i] + pSmoothedDisparity4[i]);
728  }
729 
730  delete[] pSmoothedDisparity1;
731  delete[] pSmoothedDisparity2;
732  delete[] pSmoothedDisparity3;
733  delete[] pSmoothedDisparity4;
734  }
735  else
736  {
737  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
738  {
739  pSmoothedDisparity[i] = pDisparities[i];
740  }
741  }
742 
743 
744  const float fDispMin = nDispMin;
745  const float fDispMax = nDispMax;
746  CDynamicArray afSIFTDescriptors(4);
747 
748  for (int i = 0; i < OLP_IMG_HEIGHT; i += nDisparityPointDistance)
749  {
750  for (int j = 0; j < OLP_IMG_WIDTH; j += nDisparityPointDistance)
751  {
752  //fDisp = m3.at<short>(i,j)/16;
753  float fDisp = pSmoothedDisparity[i * OLP_IMG_WIDTH + j];
754 
755  if ((fDisp > fDispMin) && (fDisp < fDispMax))
756  {
757  //ARMARX_VERBOSE_S << << "disparity: %d ", nDisp);
758  vImgPointLeft.x = j;
759  vImgPointLeft.y = i;
760  vImgPointRight.x = j - fDisp;
761  vImgPointRight.y = i;
762 
763  pStereoCalibration->Calculate3DPoint(vImgPointLeft, vImgPointRight, vPoint3D, true, false);
764  //ARMARX_VERBOSE_S << << "z: %.0f\n", vPoint3D.z);
765 
766  if (vPoint3D.z < OLP_MAX_OBJECT_DISTANCE)
767  {
768  // create point descriptor
769  CHypothesisPoint* pNewPoint = new CHypothesisPoint();
771  Math3d::SetVec(pNewPoint->vPosition, vPoint3D);
772  Math3d::SetVec(pNewPoint->vOldPosition, vPoint3D);
773  pNewPoint->pMSERDescriptor = NULL;
774  pNewPoint->pFeatureDescriptors = new CSIFTFeatureArray();
775  pNewPoint->fMembershipProbability = 0;
776  const float fIntensity = pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j)] + pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j) + 1] + pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j) + 2] + 3;
777  pNewPoint->fIntensity = (fIntensity - 3) * fIntensityFactor;
778  pNewPoint->fColorR = (pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j)] + 1) / fIntensity;
779  pNewPoint->fColorG = (pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j) + 1] + 1) / fIntensity;
780  pNewPoint->fColorB = (pRectifiedImageLeftColorGaussFiltered->pixels[3 * (i * OLP_IMG_WIDTH + j) + 2] + 1) / fIntensity;
781 
782  // create sift descriptor for point
783  m_pSIFTFeatureCalculator->CreateSIFTDescriptors(pRectifiedImageLeftGrey, &afSIFTDescriptors, j, i, 1.0f, false, false);
784 
785  if (afSIFTDescriptors.GetSize() > 0)
786  {
787  pNewPoint->pFeatureDescriptors->AddElement((CSIFTFeatureEntry*)((CSIFTFeatureEntry*)afSIFTDescriptors[0])->Clone());
788  (*pNewPoint->pFeatureDescriptors)[0]->point3d = vPoint3D;
789  // cleanup
790  CSIFTFeatureEntry* pFeature = (CSIFTFeatureEntry*)afSIFTDescriptors[0];
791  afSIFTDescriptors[0]->bDelete = false;
792  afSIFTDescriptors.Clear();
793  delete pFeature;
794  }
795 
796  aPointsFromDisparity.push_back(pNewPoint);
797  }
798  }
799  else
800  {
801  //ARMARX_VERBOSE_S << << "invalid disparity: %d\n", nDisp);
802  }
803  }
804  }
805 
806  if (pAll3DPoints)
807  {
808  for (int i = 0; i < OLP_IMG_HEIGHT; i += nDisparityPointDistance)
809  {
810  for (int j = 0; j < OLP_IMG_WIDTH; j += nDisparityPointDistance)
811  {
812  float fDisp = pSmoothedDisparity[i * OLP_IMG_WIDTH + j];
813  vImgPointLeft.x = j;
814  vImgPointLeft.y = i;
815  vImgPointRight.x = j - fDisp;
816  vImgPointRight.y = i;
817  pStereoCalibration->Calculate3DPoint(vImgPointLeft, vImgPointRight, vPoint3D, true, false);
818 
819  if (Math3d::Length(vPoint3D) > 2 * OLP_MAX_OBJECT_DISTANCE)
820  {
821  if (aPointsFromDisparity.size() > 0)
822  {
823  Math3d::SetVec(vPoint3D, aPointsFromDisparity.at(0)->vPosition);
824  }
825  }
826 
827  pAll3DPoints->push_back(vPoint3D);
828  }
829  }
830  }
831 
832 
833  delete pRectifiedImageLeftGrey;
834  delete pRectifiedImageRightGrey;
835  delete pRectifiedImageLeftGreyHalfsize;
836  delete pRectifiedImageRightGreyHalfsize;
837  delete pRectifiedImageLeftGreyQuartersize;
838  delete pRectifiedImageRightGreyQuartersize;
839  delete pRectifiedImageLeftColorGaussFiltered;
840  delete pStereoMatcher;
841  delete pStereoCalibrationCopy;
842  cvReleaseImageHeader(&pRectifiedIplImageLeft);
843  cvReleaseImageHeader(&pRectifiedIplImageRight);
844  delete[] pSmoothedDisparity;
845 }
846 
847 
848 
849 
850 void CFeatureCalculation::CalculateSmoothedDisparityImage(float* pInputDisparity, float* pSmoothedDisparity, const int nRadius)
851 {
852  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
853  {
854  pSmoothedDisparity[i] = pInputDisparity[i];
855  }
856 
857  for (int i = nRadius; i < OLP_IMG_HEIGHT - nRadius; i++)
858  {
859  for (int j = nRadius; j < OLP_IMG_WIDTH - nRadius; j++)
860  {
861  int nIndex = i * OLP_IMG_WIDTH + j;
862 
863  if (pInputDisparity[nIndex] != 0)
864  {
865  float fSum = 0;
866  int nNumPixels = 0;
867 
868  for (int l = -nRadius; l <= nRadius; l++)
869  {
870  for (int k = -nRadius; k <= nRadius; k++)
871  {
872  int nTempIndex = nIndex + l * OLP_IMG_WIDTH + k;
873 
874  if (pInputDisparity[nTempIndex] != 0)
875  {
876  fSum += pInputDisparity[nTempIndex];
877  nNumPixels++;
878  }
879  }
880  }
881 
882  pSmoothedDisparity[nIndex] = fSum / (float)nNumPixels;
883  }
884  }
885  }
886 }
887 
888 
889 
890 Vec3d CFeatureCalculation::GetCorresponding3DPoint(const Vec2d point2D, const pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pointcloud)
891 {
892  Vec3d result;
893  const int width = pointcloud->width;
894  const int height = pointcloud->height;
895  const int x = (point2D.x < 0) ? 0 : ((point2D.x > width - 1) ? width - 1 : point2D.x);
896  const int y = (point2D.y < 0) ? 0 : ((point2D.y > height - 1) ? height - 1 : point2D.y);
897  const int index = y * width + x;
898  pcl::PointXYZRGBA point3D = pointcloud->at(index);
899  result.x = point3D.x;
900  result.y = point3D.y;
901  result.z = point3D.z;
902  return result;
903 }
904 
905 
CMSERDescriptor
Definition: ObjectHypothesis.h:35
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
CMSERDescriptor3D::vSigmaPoint1a
Vec3d vSigmaPoint1a
Definition: ObjectHypothesis.h:160
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CHypothesisPoint::pMSERDescriptor
CMSERDescriptor3D * pMSERDescriptor
Definition: ObjectHypothesis.h:244
CHypothesisPoint::vOldPosition
Vec3d vOldPosition
Definition: ObjectHypothesis.h:239
index
uint8_t index
Definition: EtherCATFrame.h:59
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:168
CHypothesisPoint::fIntensity
float fIntensity
Definition: ObjectHypothesis.h:241
OLP_HARRIS_POINT_DISTANCE
#define OLP_HARRIS_POINT_DISTANCE
Definition: ObjectLearningByPushingDefinitions.h:245
FeatureCalculation.h
CMSERDescriptor::vEigenvector1
Vec2d vEigenvector1
Definition: ObjectHypothesis.h:87
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:694
CMSERDescriptor3D::vSigmaPoint2a
Vec3d vSigmaPoint2a
Definition: ObjectHypothesis.h:160
OLP_HARRIS_POINT_QUALITY
#define OLP_HARRIS_POINT_QUALITY
Definition: ObjectLearningByPushingDefinitions.h:247
MSERCalculation.h
CHypothesisPoint::fColorG
float fColorG
Definition: ObjectHypothesis.h:241
OLP_MAX_OBJECT_DISTANCE
#define OLP_MAX_OBJECT_DISTANCE
Definition: ObjectLearningByPushingDefinitions.h:106
CMSERDescriptor::vEigenvector2
Vec2d vEigenvector2
Definition: ObjectHypothesis.h:87
CMSERCalculation::FindMSERs3D
static void FindMSERs3D(const CByteImage *pByteImageLeft, const CByteImage *pByteImageRight, CStereoCalibration *pStereoCalibration, const float fToleranceFactor, std::vector< CMSERDescriptor3D * > &aRegions3D)
Definition: MSERCalculation.cpp:253
CHypothesisPoint::vPosition
Vec3d vPosition
Definition: ObjectHypothesis.h:238
CMSERDescriptor3D::vPosition
Vec3d vPosition
Definition: ObjectHypothesis.h:158
COLPTools::FillHolesGray
void FillHolesGray(const CByteImage *pInputImage, CByteImage *pOutputImage, const int nRadius)
Definition: OLPTools.cpp:1527
OLP_TOLERANCE_MODIFICATOR
#define OLP_TOLERANCE_MODIFICATOR
Definition: ObjectLearningByPushingDefinitions.h:99
CMSERDescriptor3D::vSigmaPoint1b
Vec3d vSigmaPoint1b
Definition: ObjectHypothesis.h:160
OLPTools.h
CHypothesisPoint
Definition: ObjectHypothesis.h:171
CMSERDescriptor3D
Definition: ObjectHypothesis.h:97
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:695
CHypothesisPoint::pFeatureDescriptors
CSIFTFeatureArray * pFeatureDescriptors
Definition: ObjectHypothesis.h:243
CMSERDescriptor::fEigenvalue1
float fEigenvalue1
Definition: ObjectHypothesis.h:85
CMSERDescriptor3D::pRegionLeft
CMSERDescriptor * pRegionLeft
Definition: ObjectHypothesis.h:156
CHypothesisPoint::eDepthMapPoint
@ eDepthMapPoint
Definition: ObjectHypothesis.h:235
CMSERDescriptor3D::vSigmaPoint2b
Vec3d vSigmaPoint2b
Definition: ObjectHypothesis.h:160
CHypothesisPoint::fColorR
float fColorR
Definition: ObjectHypothesis.h:241
CMSERDescriptor::vMean
Vec2d vMean
Definition: ObjectHypothesis.h:83
armarx::ctrlutil::v
double v(double t, double v0, double a0, double j)
Definition: CtrlUtil.h:39
float
#define float
Definition: 16_Level.h:22
CFeatureCalculation::CFeatureCalculation
CFeatureCalculation()
Definition: FeatureCalculation.cpp:60
CFeatureCalculation::~CFeatureCalculation
~CFeatureCalculation()
Definition: FeatureCalculation.cpp:71
OLP_MIN_X_VALUE_SIFT_POINTS
#define OLP_MIN_X_VALUE_SIFT_POINTS
Definition: ObjectLearningByPushingDefinitions.h:310
CHypothesisPoint::ePointType
EPointType ePointType
Definition: ObjectHypothesis.h:240
CFeatureCalculation::GetAllFeaturePoints
void GetAllFeaturePoints(const CByteImage *pImageLeftColor, const CByteImage *pImageRightColor, const CByteImage *pImageLeftGrey, const CByteImage *pImageRightGrey, const int nDisparityPointDistance, CStereoCalibration *pStereoCalibration, CSIFTFeatureArray &aAllSIFTPoints, std::vector< CMSERDescriptor3D * > &aAllMSERs, std::vector< CHypothesisPoint * > &aPointsFromDepthImage, CByteImage *pDisparityImage, std::vector< Vec3d > *pAll3DPoints=NULL)
Definition: FeatureCalculation.cpp:78
ARMARX_VERBOSE_S
#define ARMARX_VERBOSE_S
Definition: Logging.h:200
CMSERCalculation::FindMSERs2D
static void FindMSERs2D(const CByteImage *pRGBImage, const CByteImage *pHSVImage, std::vector< CMSERDescriptor * > &aMSERDescriptors)
Definition: MSERCalculation.cpp:58
Logging.h
CMSERDescriptor::fEigenvalue2
float fEigenvalue2
Definition: ObjectHypothesis.h:85
CHypothesisPoint::fColorB
float fColorB
Definition: ObjectHypothesis.h:241
CHypothesisPoint::fMembershipProbability
float fMembershipProbability
Definition: ObjectHypothesis.h:242