ObjectRecognition.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 #include "ObjectRecognition.h"
25 
26 #include "OLPTools.h"
28 #include "PointCloudRegistration.h"
29 
30 // IVT
31 #include <Calibration/Calibration.h>
32 #include <Calibration/StereoCalibration.h>
33 #include <Image/ByteImage.h>
34 #include <Image/ImageProcessor.h>
35 
36 #ifdef OLP_USE_EARLYVISION
37 // EV
38 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/Descriptors/HueDescriptor.h>
39 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/Descriptors/HueLaplacianDescriptor.h>
40 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/RFCSignatureCalculator.h>
41 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/RFCSignatureFeatureEntry.h>
42 #include </home/staff/schieben/EarlyVision/src/Base/Vision/Recognition/AdditiveRegionImage.h>
43 #endif
44 
45 // OpenMP
46 #include <omp.h>
47 
48 // stdlib
49 #include <cmath>
50 #include <cstdlib>
51 
53 
54 // #include <fstream>
55 #include <pcl/io/pcd_io.h>
56 
57 void
59  const std::string sObjectName,
60  const int nDescriptorNumber)
61 {
62  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
63  sFileName.append(sObjectName);
64  ARMARX_VERBOSE_S << "Saving object descriptor. File name: " << sFileName.c_str();
65 
66  // save the confirmed points
67  std::string sDescNum = std::to_string(nDescriptorNumber);
68  sFileName += "-cloud" + std::string(2 - sDescNum.length(), '0') + sDescNum + ".pcd";
69 
70  auto cloud = COLPTools::ConfirmedPointsToPCL(pHypothesis);
71  // pcl::io::savePCDFileASCII(sFileName, *cloud);
72  pcl::io::savePCDFileBinary(sFileName, *cloud);
73 
74  /*
75  std::ofstream fs(sFileName);
76  if (!fs.is_open())
77  {
78  ARMARX_WARNING << "Could not write to file '" << sFileName << ".pcd'" << armarx::flush;
79  return;
80  }
81 
82  fs << "VERSION 0.7\n";
83  fs << "FIELDS x y z rgba\n";
84  fs << "SIZE 4 4 4 4\n";
85  fs << "TYPE F F F U\n";
86  fs << "COUNT 1 1 1 1\n";
87  fs << "WIDTH " << pHypothesis->aConfirmedPoints.size() << "\n";
88  fs << "HEIGHT 1\n";
89  fs << "VIEWPOINT 0 0 0 1 0 0 0\n";
90  fs << "POINTS " << pHypothesis->aConfirmedPoints.size() << "\n";
91  fs << "DATA ascii\n";
92 
93  Vec3d vCenter = pHypothesis->vCenter;
94  for (const CHypothesisPoint* p : pHypothesis->aConfirmedPoints)
95  {
96  uint32_t rgb = 0;
97  rgb |= static_cast<uint8_t>(255.0f * p->fColorR * p->fIntensity) << 16;
98  rgb |= static_cast<uint8_t>(255.0f * p->fColorG * p->fIntensity) << 8;
99  rgb |= static_cast<uint8_t>(255.0f * p->fColorB * p->fIntensity);
100 
101  fs << p->vPosition.x - vCenter.x << " " << p->vPosition.y - vCenter.y << " " << p->vPosition.z - vCenter.z << " ";
102  fs << rgb << "\n";
103  }
104 
105  fs.close();
106  */
107 }
108 
109 void
111  const std::string sObjectName,
112  const int nDescriptorNumber)
113 {
114  setlocale(LC_NUMERIC, "C");
115  //printf ("Locale is: %s\n", setlocale(LC_NUMERIC, NULL));
116 
117  // generate filename and create file
118  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
119  sFileName.append(sObjectName);
120  std::string sFileNamePointsOnly = sFileName;
121  std::string sFileNamePointsOnlyNoncentered = sFileName;
122  sFileName.append("00.dat");
123  sFileNamePointsOnly.append("-points00.dat");
124  sFileNamePointsOnlyNoncentered.append("-points-noncentered00.dat");
125  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
126  COLPTools::SetNumberInFileName(sFileNamePointsOnly, nDescriptorNumber, 2);
127  COLPTools::SetNumberInFileName(sFileNamePointsOnlyNoncentered, nDescriptorNumber, 2);
128  ARMARX_VERBOSE_S << "Saving object descriptor. File name: " << sFileName.c_str();
129  FILE* pFile = fopen(sFileName.c_str(), "wt");
130  FILE* pFilePointsOnly = fopen(sFileNamePointsOnly.c_str(), "wt");
131  FILE* pFilePointsOnlyNoncentered = fopen(sFileNamePointsOnlyNoncentered.c_str(), "wt");
132 
133 
134  // save the confirmed points
135  fprintf(pFile, "%ld \n", pHypothesis->aConfirmedPoints.size());
136  Vec3d vCenter = pHypothesis->vCenter;
137 
138  for (size_t i = 0; i < pHypothesis->aConfirmedPoints.size(); i++)
139  {
140  CHypothesisPoint* pPoint = pHypothesis->aConfirmedPoints.at(i);
141  fprintf(pFile,
142  "%e %e %e %e %e %e %e \n",
143  pPoint->vPosition.x - vCenter.x,
144  pPoint->vPosition.y - vCenter.y,
145  pPoint->vPosition.z - vCenter.z,
146  pPoint->fColorR,
147  pPoint->fColorG,
148  pPoint->fColorB,
149  pPoint->fIntensity);
150  fprintf(pFilePointsOnly,
151  "%e %e %e \n",
152  pPoint->vPosition.x - vCenter.x,
153  pPoint->vPosition.y - vCenter.y,
154  pPoint->vPosition.z - vCenter.z);
155  fprintf(pFilePointsOnlyNoncentered,
156  "%e %e %e \n",
157  pPoint->vPosition.x,
158  pPoint->vPosition.y,
159  pPoint->vPosition.z);
160  }
161 
162  // save the candidate points too
163  if (false)
164  {
165  fprintf(pFile, "%ld \n", pHypothesis->aNewPoints.size());
166 
167  for (size_t i = 0; i < pHypothesis->aNewPoints.size(); i++)
168  {
169  CHypothesisPoint* pPoint = pHypothesis->aNewPoints.at(i);
170  fprintf(pFile,
171  "%e %e %e %e %e %e %e \n",
172  pPoint->vPosition.x - vCenter.x,
173  pPoint->vPosition.y - vCenter.y,
174  pPoint->vPosition.z - vCenter.z,
175  pPoint->fColorR,
176  pPoint->fColorG,
177  pPoint->fColorB,
178  pPoint->fIntensity);
179  fprintf(pFilePointsOnly,
180  "%e %e %e \n",
181  pPoint->vPosition.x - vCenter.x,
182  pPoint->vPosition.y - vCenter.y,
183  pPoint->vPosition.z - vCenter.z);
184  fprintf(pFilePointsOnlyNoncentered,
185  "%e %e %e \n",
186  pPoint->vPosition.x,
187  pPoint->vPosition.y,
188  pPoint->vPosition.z);
189  }
190  }
191 
192  // close the file
193  fclose(pFile);
194  fclose(pFilePointsOnly);
195  fclose(pFilePointsOnlyNoncentered);
196 }
197 
198 bool
199 CObjectRecognition::LoadObjectDescriptorRGBD(const std::string sObjectName,
200  const int nDescriptorNumber,
201  CObjectHypothesis*& pHypothesis)
202 {
203  setlocale(LC_NUMERIC, "C");
204 
205  // generate filename and open file
206  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
207  sFileName.append(sObjectName);
208  sFileName.append("00.dat");
209  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
210  FILE* pFile = fopen(sFileName.c_str(), "rt");
211 
212  if (pFile == NULL)
213  {
214  pFile = fopen(sFileName.c_str(), "rt");
215 
216  if (pFile == NULL)
217  {
218  ARMARX_VERBOSE_S << "Cannot open object file: " << sFileName.c_str();
219  pHypothesis = NULL;
220  return false;
221  }
222  }
223 
224  pHypothesis = new CObjectHypothesis();
225  pHypothesis->eType = CObjectHypothesis::eRGBD;
226  Math3d::SetVec(pHypothesis->vCenter, 0, 0, 0);
227 
228  // read the confirmed points
229  size_t nNumPoints;
230  int dummy = fscanf(pFile, "%lu", &nNumPoints);
231 
232  for (size_t i = 0; i < nNumPoints; i++)
233  {
234  CHypothesisPoint* pPoint = new CHypothesisPoint();
236  dummy += fscanf(
237  pFile, "%e %e %e", &pPoint->vPosition.x, &pPoint->vPosition.y, &pPoint->vPosition.z);
238  dummy += fscanf(pFile,
239  "%e %e %e %e \n",
240  &pPoint->fColorR,
241  &pPoint->fColorG,
242  &pPoint->fColorB,
243  &pPoint->fIntensity);
244  pHypothesis->aConfirmedPoints.push_back(pPoint);
245  }
246 
247  // read the candidate points
248  dummy += fscanf(pFile, "%lu", &nNumPoints);
249 
250  for (size_t i = 0; i < nNumPoints; i++)
251  {
252  CHypothesisPoint* pPoint = new CHypothesisPoint();
254  dummy += fscanf(
255  pFile, "%e %e %e", &pPoint->vPosition.x, &pPoint->vPosition.y, &pPoint->vPosition.z);
256  dummy += fscanf(pFile,
257  "%e %e %e %e \n",
258  &pPoint->fColorR,
259  &pPoint->fColorG,
260  &pPoint->fColorB,
261  &pPoint->fIntensity);
262  pHypothesis->aNewPoints.push_back(pPoint);
263  }
264 
265  return true;
266 }
267 
268 void
270  const CByteImage* pHSVImage,
271  const std::vector<CHypothesisPoint*>& aScenePoints,
272  const CCalibration* calibration,
273  std::vector<Vec3d>& aPossibleLocations,
274  const int desiredNumberOfLocations)
275 {
276  const int width = pHSVImage->width;
277  const int height = pHSVImage->height;
278 
279  // detect possible locations of the object
280  std::vector<float> hueHistogram, saturationHistogram;
281  COLPTools::CreateHueAndSaturationHistogram(pHypothesis, hueHistogram, saturationHistogram);
282 
283  CByteImage* probabilityImage = new CByteImage(width, height, CByteImage::eGrayScale);
285  pHSVImage, hueHistogram, saturationHistogram, probabilityImage);
286 
287  //ImageProcessor::Spread(probabilityImage, probabilityImage);
288  ::ImageProcessor::HistogramStretching(
289  probabilityImage, probabilityImage, 0.7f, 1.0f); // 0.85, 1.0
290 
291 
292  // calculate median in x and y direction
293 
294  std::vector<long> rowSums(height, 0);
295  std::vector<long> columnSums(width, 0);
296 
297  long sum = 0;
298 
299  for (int i = 0; i < height; i++)
300  {
301  for (int j = 0; j < width; j++)
302  {
303  rowSums[i] += probabilityImage->pixels[i * width + j];
304  columnSums[j] += probabilityImage->pixels[i * width + j];
305  sum += probabilityImage->pixels[i * width + j];
306  }
307  }
308 
309  sum /= 2;
310 
311  for (int i = 1; i < height; i++)
312  {
313  rowSums[i] += rowSums[i - 1];
314  }
315 
316  for (int j = 1; j < width; j++)
317  {
318  columnSums[j] += columnSums[j - 1];
319  }
320 
321  int x = 0;
322 
323  while ((columnSums[x] < sum) && (x < width))
324  {
325  x++;
326  }
327 
328  int y = 0;
329 
330  while ((rowSums[y] < sum) && (y < height))
331  {
332  y++;
333  }
334 
335  Vec2d vMedian = {(float)x, (float)y};
336 
337 
338  // cluster probable areas
339 
340  std::vector<Vec3d> samplePoints;
341 
342  for (int i = 0; i < height; i++)
343  {
344  for (int j = 0; j < width; j++)
345  {
346  if (rand() % 256 < probabilityImage->pixels[i * width + j])
347  {
348  if (rand() % 100 < 40 + 10 * OLP_EFFORT_POINTCLOUD_MATCHING)
349  {
350  Vec3d newVec = {(float)j, (float)i, 0};
351  samplePoints.push_back(newVec);
352  }
353  }
354  }
355  }
356 
357  std::vector<std::vector<Vec3d>> clusters;
358  std::vector<std::vector<int>> oldIndices;
359 
361  {
363  samplePoints, 1, 5, OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION, clusters, oldIndices);
364  }
365  else
366  {
367  COLPTools::ClusterXMeans(samplePoints,
368  1,
369  4,
371  clusters,
372  oldIndices);
373  }
374 
375  std::vector<Vec2d> locationCandidates;
376  locationCandidates.push_back(vMedian);
377 
378  if (clusters.size() > 1)
379  {
380  for (size_t i = 0; i < clusters.size(); i++)
381  {
382  Vec3d vClusterCenter3D;
383  float fTemp;
384  COLPTools::GetMeanAndVariance(clusters.at(i), vClusterCenter3D, fTemp);
385  Vec2d vClusterCenter2D = {vClusterCenter3D.x, vClusterCenter3D.y};
386  locationCandidates.push_back(vClusterCenter2D);
387  //COLPTools::DrawCross(probabilityImage, vClusterCenter2D.x, vClusterCenter2D.y, 50);
388  }
389  }
390 
391  int numAttempts = 0;
392 
393  while ((int)locationCandidates.size() < desiredNumberOfLocations && numAttempts < 10000)
394  {
395  int u = rand() % probabilityImage->width;
396  int v = rand() % probabilityImage->height;
397 
398  if (probabilityImage->pixels[v * probabilityImage->width + u] > rand() % 500)
399  {
400  Vec2d pos = {(float)u, (float)v};
401  locationCandidates.push_back(pos);
402  }
403 
404  numAttempts++;
405  }
406 
407 
408  // visualize
410  {
411  for (size_t n = 0; n < locationCandidates.size(); n++)
412  {
414  probabilityImage, locationCandidates.at(n).x, locationCandidates.at(n).y, 50);
415  }
416 
417  std::string filename = OLP_SCREENSHOT_PATH;
418  filename += "prob.bmp";
419  probabilityImage->SaveToFile(filename.c_str());
420  }
421 
422 
423  // calculate 3D positions
424  for (size_t n = 0; n < locationCandidates.size(); n++)
425  {
426  // calculate the 3D line defined by the 2D image point, and find the closest point in the scene point cloud
427  Vec3d point1, point2, direction, estimatedPosition, temp1, temp2;
428  calibration->ImageToWorldCoordinates(locationCandidates.at(n), point1, 100.0f, false);
429  calibration->ImageToWorldCoordinates(locationCandidates.at(n), point2, 200.0f, false);
430  Math3d::SubtractVecVec(point1, point2, direction);
431  Math3d::NormalizeVec(direction);
432  float fDist, fMinDist = 10000, fBestZ = 500;
433 
434  for (size_t i = 0; i < aScenePoints.size(); i++)
435  {
436  if (Math3d::Length(aScenePoints.at(i)->vPosition) > 0)
437  {
438  Math3d::SubtractVecVec(aScenePoints.at(i)->vPosition, point1, temp1);
439  Math3d::CrossProduct(temp1, direction, temp2);
440  fDist = Math3d::Length(temp2);
441 
442  if (fDist < fMinDist)
443  {
444  fMinDist = fDist;
445  fBestZ = aScenePoints.at(i)->vPosition.z;
446  }
447  }
448  }
449 
450  // get the point corresponding to the pixel with the same distance to the camera as the closest scene point
451  calibration->ImageToWorldCoordinates(
452  locationCandidates.at(n), estimatedPosition, fBestZ, false);
453 
454  //ARMARX_VERBOSE_S << "(%d, %d) (%.1f, %.1f, %.1f)\n", locationCandidates.at(n).x, locationCandidates.at(n).y, estimatedPosition.x, estimatedPosition.y, estimatedPosition.z);
455  aPossibleLocations.push_back(estimatedPosition);
456  }
457 
458  //probabilityImage->SaveToFile("/homes/staff/schieben/datalog/prob.bmp");
459  delete probabilityImage;
460 }
461 
462 void
464  const CByteImage* pHSVImage,
465  const std::vector<float>& aHueHistogram,
466  const std::vector<float>& aSaturationHistogram,
467  CByteImage* pProbabilityImage)
468 {
469  const int nNumScales = 2;
470  const float fVisualizationFactor = 0.33f * 255.0f / (float)nNumScales;
471  const int nMinNumSubdivisionsX = 4;
472 
473  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
474  {
475  pProbabilityImage->pixels[i] = 0;
476  }
477 
478  std::vector<float> aHueHistogramWindow;
479  aHueHistogramWindow.resize(aHueHistogram.size());
480  std::vector<float> aSaturationHistogramWindow;
481  aSaturationHistogramWindow.resize(aSaturationHistogram.size());
482 
483  for (int n = 0; n < nNumScales; n++)
484  {
485  //const int nNumSubdivisions = nMinNumNumSubdivisions << n;
486  const int nNumSubdivisionsX = (int)((double)nMinNumSubdivisionsX * pow(sqrt(2), n));
487  const int nNumSubdivisionsY =
488  (int)(3.0 / 4.0 * (double)nMinNumSubdivisionsX * pow(sqrt(2), n));
489  const int nWindowSizeX = OLP_IMG_WIDTH / nNumSubdivisionsX;
490  const int nWindowSizeY = OLP_IMG_HEIGHT / nNumSubdivisionsY;
491  int nMinX, nMaxX, nMinY, nMaxY;
492 
493  const int nWindowSizeX_3 = nWindowSizeX / 3;
494  const int nWindowSizeY_3 = nWindowSizeY / 3;
495 
496  for (int i = 0; i < nNumSubdivisionsY; i++)
497  {
498  for (int j = 0; j < nNumSubdivisionsX; j++)
499  {
500  // shift window
501  for (int m = -1; m <= 1; m++)
502  {
503  nMinX = j * nWindowSizeX + m * nWindowSizeX_3;
504  nMaxX = (j + 1) * nWindowSizeX + m * nWindowSizeX_3;
505  nMinY = i * nWindowSizeY + m * nWindowSizeY_3;
506  nMaxY = (i + 1) * nWindowSizeY + m * nWindowSizeY_3;
507 
508  if (nMinX < 0)
509  {
510  nMinX = 0;
511  }
512 
513  if (nMinY < 0)
514  {
515  nMinY = 0;
516  }
517 
518  if (nMaxX > OLP_IMG_WIDTH)
519  {
520  nMaxX = OLP_IMG_WIDTH;
521  }
522 
523  if (nMaxY > OLP_IMG_HEIGHT)
524  {
525  nMaxY = OLP_IMG_HEIGHT;
526  }
527 
529  nMinX,
530  nMinY,
531  nMaxX,
532  nMaxY,
533  aHueHistogramWindow,
534  aSaturationHistogramWindow);
535 
536  const float fHistogramDistance =
537  0.5f *
538  COLPTools::GetHistogramDistanceX2(aHueHistogram, aHueHistogramWindow) +
539  0.5f * COLPTools::GetHistogramDistanceX2(aSaturationHistogram,
540  aSaturationHistogramWindow);
541  const float fCorrelation = 1.0f - (0.25f * fHistogramDistance *
542  fHistogramDistance); // distance in [0,2]
543 
544  // visualize in probability image
545  unsigned char nCorrelationValueVisualization =
546  (unsigned char)(fVisualizationFactor * fCorrelation);
547 
548  for (int k = nMinY; k < nMaxY; k++)
549  {
550  for (int l = nMinX; l < nMaxX; l++)
551  {
552  pProbabilityImage->pixels[k * OLP_IMG_WIDTH + l] +=
553  nCorrelationValueVisualization;
554  }
555  }
556  }
557  }
558  }
559  }
560 }
561 
562 void
564  const CByteImage* pHSVImage,
565  const std::vector<CHypothesisPoint*>& aScenePoints,
566  const CCalibration* calibration,
567  const Vec3d upwardsVector,
568  Vec3d& vTranslation,
569  Mat3d& mOrientation,
570  float& distance,
571  float& fProbability,
572  CByteImage* pResultImage)
573 {
574  // get possible object locations based on color region similarity
575  std::vector<Vec3d> aPossibleLocations;
577  pHypothesis, pHSVImage, aScenePoints, calibration, aPossibleLocations);
578 
579  // set up the point cloud registration
580  CPointCloudRegistration* pPointCloudRegistration = new CPointCloudRegistration();
581  pPointCloudRegistration->SetScenePointcloud(aScenePoints);
582 
583 
584  // try to match the object at the promising locations
585  distance = pPointCloudRegistration->EstimateTransformation(pHypothesis,
586  mOrientation,
587  vTranslation,
588  upwardsVector,
590  &aPossibleLocations);
591  delete pPointCloudRegistration;
592 
593  fProbability = 1.0f / (1.0f + 0.12f * distance);
594 
595  ARMARX_IMPORTANT_S << "Distance: " << distance << ", probability: " << fProbability;
596  ARMARX_VERBOSE_S << "Translation: ( " << vTranslation.x << ", " << vTranslation.y << ", "
597  << vTranslation.z << ")";
598 
599 
600  // visualize
601  if (pResultImage)
602  {
603  std::vector<CHypothesisPoint*> aObjectPoints;
604 
605  for (size_t i = 0; i < pHypothesis->aConfirmedPoints.size(); i++)
606  {
607  aObjectPoints.push_back(pHypothesis->aConfirmedPoints.at(i)->GetCopy());
608  Math3d::MulMatVec(mOrientation,
609  aObjectPoints.back()->vPosition,
610  vTranslation,
611  aObjectPoints.back()->vPosition);
612  }
613 
614  unsigned char r, g, b;
615 
616  if (fProbability > 0.5f)
617  {
618  r = 0;
619  g = 200;
620  b = 0;
621  }
622  else
623  {
624  r = 100;
625  g = 20;
626  b = 20;
627  }
628 
629  Vec2d vPos2D;
630 
631  for (size_t i = 0; i < aObjectPoints.size(); i++)
632  {
633  calibration->WorldToImageCoordinates(aObjectPoints.at(i)->vPosition, vPos2D, false);
634 
635  if (vPos2D.x > 1 && vPos2D.y > 1 && vPos2D.x < OLP_IMG_WIDTH - 2 &&
636  vPos2D.y < OLP_IMG_HEIGHT - 2)
637  {
638  for (int j = -1; j <= 1; j++)
639  {
640  pResultImage
641  ->pixels[3 * (((int)vPos2D.y + j) * OLP_IMG_WIDTH + (int)vPos2D.x)] = r;
642  pResultImage
643  ->pixels[3 * (((int)vPos2D.y + j) * OLP_IMG_WIDTH + (int)vPos2D.x) + 1] = g;
644  pResultImage
645  ->pixels[3 * (((int)vPos2D.y + j) * OLP_IMG_WIDTH + (int)vPos2D.x) + 2] = b;
646 
647  pResultImage->pixels[3 * ((int)vPos2D.y * OLP_IMG_WIDTH + (int)vPos2D.x + j)] =
648  r;
649  pResultImage
650  ->pixels[3 * ((int)vPos2D.y * OLP_IMG_WIDTH + (int)vPos2D.x + j) + 1] = g;
651  pResultImage
652  ->pixels[3 * ((int)vPos2D.y * OLP_IMG_WIDTH + (int)vPos2D.x + j) + 2] = b;
653  }
654  }
655  }
656 
657  // cleanup
658  for (size_t i = 0; i < aObjectPoints.size(); i++)
659  {
660  delete aObjectPoints.at(i);
661  }
662  }
663 }
664 
665 void
666 CObjectRecognition::FindAllObjectsRGBD(const CByteImage* pHSVImage,
667  const CByteImage* pRGBImage,
668  const std::vector<CHypothesisPoint*>& aScenePoints,
669  const CCalibration* calibration,
670  const Vec3d upwardsVector,
671  std::vector<std::string>& aNames,
672  std::vector<Vec3d>& aPositions,
673  std::vector<Mat3d>& aOrientations,
674  std::vector<float>& aProbabilities)
675 {
676  // load the names of the known objects, and the numbers of the respective descriptor file to be used
677  std::string sObjectNamesFileName = OLP_OBJECT_LEARNING_DIR;
678  sObjectNamesFileName += "names.txt";
679  FILE* pObjectNamesFile = fopen(sObjectNamesFileName.c_str(), "rt");
680 
681  if (pObjectNamesFile == NULL)
682  {
683  ARMARX_VERBOSE_S << "Opening file with object names failed!";
684  return;
685  }
686 
687  int nNumObjects;
688  int dummy = fscanf(pObjectNamesFile, "%d", &nNumObjects);
689  char pcObjName[100];
690  int nDescriptorNumber;
691  std::string sObjName;
692  std::vector<int> aDescriptorNumbers;
693  aNames.clear();
694 
695  for (int i = 0; i < nNumObjects; i++)
696  {
697  dummy += fscanf(pObjectNamesFile, "%s", pcObjName);
698  dummy += fscanf(pObjectNamesFile, "%d", &nDescriptorNumber);
699  sObjName = pcObjName;
700  aNames.push_back(sObjName);
701  aDescriptorNumbers.push_back(nDescriptorNumber);
702  }
703 
704  aPositions.resize(nNumObjects);
705  aOrientations.resize(nNumObjects);
706  aProbabilities.resize(nNumObjects);
707  std::vector<float> aDistances;
708  aDistances.resize(nNumObjects);
709  fclose(pObjectNamesFile);
710 
711  // debug: result images
712  CByteImage** pResultImages = new CByteImage*[nNumObjects];
713 
714  for (int i = 0; i < nNumObjects; i++)
715  {
716  pResultImages[i] = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
717  ImageProcessor::CopyImage(pRGBImage, pResultImages[i]);
718  }
719 
720  // try to recognize the objects
721  for (int i = 0; i < nNumObjects; i++)
722  {
723  CObjectHypothesis* pHypothesis;
724  ARMARX_VERBOSE_S << aNames.at(i) << " (" << i << ")";
725  LoadObjectDescriptorRGBD(aNames.at(i), aDescriptorNumbers.at(i), pHypothesis);
726  FindObjectRGBD(pHypothesis,
727  pHSVImage,
728  aScenePoints,
729  calibration,
730  upwardsVector,
731  aPositions.at(i),
732  aOrientations.at(i),
733  aDistances.at(i),
734  aProbabilities.at(i),
735  pResultImages[i]);
736  delete pHypothesis;
737  }
738 
739 
740  // debug: write recognition results to file
741  for (int i = 0; i < nNumObjects; i++)
742  {
743  std::string sResImgName = "/home/staff/schieben/datalog/reco00.bmp";
744  COLPTools::SetNumberInFileName(sResImgName, i, 2);
745  pResultImages[i]->SaveToFile(sResImgName.c_str());
746  delete pResultImages[i];
747  }
748 
749  delete[] pResultImages;
750 }
751 
752 void
754  const CByteImage* pHSVImage,
755  const CByteImage* pRGBImage,
756  const std::vector<CHypothesisPoint*>& aScenePoints,
757  const CCalibration* calibration,
758  const Vec3d upwardsVector,
759  const std::string objectName,
760  const int numDescriptors)
761 {
762  std::string directory = OLP_OBJECT_LEARNING_DIR;
763  std::string sFileName = directory + "reco_dist_" + objectName + ".txt";
764  FILE* pFile = fopen(sFileName.c_str(), "a");
765 
766  CByteImage* resultImage = new CByteImage(pRGBImage);
767  ImageProcessor::CopyImage(pRGBImage, resultImage);
768 
769  Vec3d position;
770  Mat3d orientation;
771  float probability = -1;
772  float distance = -1;
773 
774  for (int i = 1; i <= numDescriptors; i++)
775  {
776  CObjectHypothesis* pHypothesis;
777  ARMARX_INFO_S << "Recognizing " << objectName << " (descriptor " << i << ")";
778  LoadObjectDescriptorRGBD(objectName, i, pHypothesis);
779  FindObjectRGBD(pHypothesis,
780  pHSVImage,
781  aScenePoints,
782  calibration,
783  upwardsVector,
784  position,
785  orientation,
786  distance,
787  probability,
788  resultImage);
789  fprintf(pFile, "%e ", distance);
790  delete pHypothesis;
791  }
792 
793  std::string sResImgName = directory + "reco_result.bmp";
794  resultImage->SaveToFile(sResImgName.c_str());
795 
796  fprintf(pFile, "\n");
797  fclose(pFile);
798 }
799 
800 void
802  const CByteImage* pObjectMask,
803  const std::string sObjectName,
804  const int nDescriptorNumber)
805 {
806 #ifdef OLP_USE_EARLYVISION
807  ///////////////////////////////////////////////////////////
808  // parameters
809  ///////////////////////////////////////////////////////////
810  const int nNumberClusters =
811  30; // number of clusters to use. More clusters means more descriptive but too much cluster means overfitting with varying lighting conditions.
812  const float fErrorThreshold =
813  0.5; // threshold for matching histograms. Should be reduced if two many matches arise
814  const float fMatchThreshold = 0.5; // threshold for assigning values to clusters
815 
816  ////////////////////////////////
817  // create signature calculator
818  ////////////////////////////////
819  CRFCSignatureCalculator* pCalc = new CRFCSignatureCalculator(nNumberClusters);
820 
821  pCalc->SetUseZeroOffset(false);
822  pCalc->SetErrorThreshold(fErrorThreshold);
823  pCalc->SetCorrespondanceDistanceFactor(fMatchThreshold);
824  pCalc->SetMinConsidered(0.5);
825 
826  ///////////////////////////////////////////////////////////
827  // setup dimension. You can use any dimension from Base/Features/RFCH/Descriptors/
828  ///////////////////////////////////////////////////////////
829  CHueDescriptor* pHue = new CHueDescriptor(0, 10, 255, 10, 255);
830  CHueLaplacianDescriptor* pHueL = new CHueLaplacianDescriptor(0, 10, 255, 10, 255);
831  pCalc->AddDimension(pHue);
832  pCalc->AddDimension(pHueL);
833 
834  pCalc->Init(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, 30, 20); // number windows: 30 x 20 y
835 
836  // train query
837  CByteImage* pRGBImageCopy = new CByteImage(pRGBImage);
838  ImageProcessor::CopyImage(pRGBImage, pRGBImageCopy);
839 
840  if (pRGBImageCopy->width != pRGBImage->width)
841  {
842  ARMARX_VERBOSE_S << "\nBuuuuugs!\n\n");
843  }
844 
845  CByteImage* pObjectMaskCopy = new CByteImage(pObjectMask);
846  ImageProcessor::CopyImage(pObjectMask, pObjectMaskCopy);
847  CRFCSignatureFeatureEntry* pFeatureDescriptor =
848  (CRFCSignatureFeatureEntry*)pCalc->Train(pRGBImageCopy, pObjectMaskCopy);
849  CRFCSignatureCalculator::NormalizeHistogram(pFeatureDescriptor, 10000); // always do that
850 
851  // generate filename, create file, write descriptor
852  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
853  sFileName.append(sObjectName);
854  sFileName.append("00.rfc");
855  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
856  ARMARX_VERBOSE_S << "\nSaving object descriptor. File name: %s\n", sFileName.c_str());
857  FILE* pFile = fopen(sFileName.c_str(), "wt");
858 
859  if (!pFeatureDescriptor->WriteToFile(pFile))
860  {
861  ARMARX_VERBOSE_S << "\nError writing to file!\n\n");
862  }
863 
864  fclose(pFile);
865 #endif
866 }
867 
868 void
869 CObjectRecognition::LoadObjectDescriptorRFCH(const std::string sObjectName,
870  const int nDescriptorNumber,
871  CRFCSignatureFeatureEntry*& pFeatureDescriptor)
872 {
873 #ifdef OLP_USE_EARLYVISION
874  // generate filename
875  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
876  sFileName.append(sObjectName);
877  sFileName.append("00.rfc");
878  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
879  FILE* pFile = fopen(sFileName.c_str(), "rt");
880 
881  if (pFile == NULL)
882  {
883  ARMARX_VERBOSE_S << "\nCannot open object file: %s\n\n", sFileName.c_str());
884  pFeatureDescriptor = NULL;
885  }
886  else
887  {
888  pFeatureDescriptor->ReadFromFile(pFile);
889  }
890 
891 #endif
892 }
893 
894 void
895 CObjectRecognition::FindObjectRFCH(CRFCSignatureFeatureEntry* pFeatureDescriptor,
896  CByteImage* pRGBImage,
897  const std::vector<CHypothesisPoint*>& aScenePoints,
898  const CCalibration* calibration,
899  Vec3d& vPosition,
900  Mat3d& mOrientation,
901  float& fProbability,
902  CByteImage* pResultImage)
903 {
904 #ifdef OLP_USE_EARLYVISION
905  ///////////////////////////////////////////////////////////
906  // parameters
907  ///////////////////////////////////////////////////////////
908  const int nNumberClusters =
909  30; // number of clusters to use. More clusters means more descriptive but too much cluster means overfitting with varying lighting conditions.
910  const float fErrorThreshold =
911  0.5; // threshold for matching histograms. Should be reduced if two many matches arise
912  const float fMatchThreshold = 0.5; // threshold for assigning values to clusters
913  float fGrowThreshold =
914  0.8; // grow threshold of the additive region image. If the detected area is too big, reduce this value. Otherwise increase.
915 
916  ////////////////////////////////
917  // create signature calculator
918  ////////////////////////////////
919  CRFCSignatureCalculator* pCalc = new CRFCSignatureCalculator(nNumberClusters);
920 
921  pCalc->SetUseZeroOffset(false);
922  pCalc->SetErrorThreshold(fErrorThreshold);
923  pCalc->SetCorrespondanceDistanceFactor(fMatchThreshold);
924  pCalc->SetMinConsidered(0.5);
925 
926  ///////////////////////////////////////////////////////////
927  // setup dimension. You can use any dimension from Base/Features/RFCH/Descriptors/
928  ///////////////////////////////////////////////////////////
929  CHueDescriptor* pHue = new CHueDescriptor(0, 10, 255, 10, 255);
930  CHueLaplacianDescriptor* pHueL = new CHueLaplacianDescriptor(0, 10, 255, 10, 255);
931  pCalc->AddDimension(pHue);
932  pCalc->AddDimension(pHueL);
933 
934  pCalc->Init(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, 30, 20); // number windows: 30 x 20 y
935 
936 
937  // match in scene
938  CFeatureEntry* pfQuery = pFeatureDescriptor;
939  pCalc->PrepareMatching(pRGBImage, NULL, &pfQuery, 1); // query one feature
940  vector<TRFCMatch> aMatches = pCalc->Match(
941  pfQuery, 2, 2, 4, 4); // 2,2: min number of windows x,y 4,4: max number of windows x,y
942 
943  ARMARX_VERBOSE_S << "%ld matches extracted\n", aMatches.size());
944 
945  // apply ARI
946  int nBoarderOffset = pCalc->GetBoarderOffset();
947  int nNumberWindowsX, nNumberWindowsY, nWindowPixelsX, nWindowPixelsY;
948 
949  pCalc->GetNumberWindows(nNumberWindowsX, nNumberWindowsY); // get number of search windows
950  pCalc->GetWindowSize(nWindowPixelsX, nWindowPixelsY); // get size of minimum window size
951 
952  CAdditiveRegionImage* pARI = new CAdditiveRegionImage(
953  nNumberWindowsX, nNumberWindowsY, nWindowPixelsX, nWindowPixelsY, fGrowThreshold);
954  pARI->setOffsets(nBoarderOffset, nBoarderOffset);
955 
956  vector<TRFCMatch>::iterator iter = aMatches.begin();
957  int nID = 0;
958  std::vector<TRegionListEntry> aRegionList;
959 
960  while (iter != aMatches.end())
961  {
962  TRegionListEntry entry;
963  entry.region = iter->region;
964  entry.fActivation = iter->fMatch;
965  entry.id = nID;
966  entry.bUse = true;
967 
968  aRegionList.push_back(entry);
969 
970  nID++;
971  iter++;
972  }
973 
974  pARI->set(aRegionList);
975  ARMARX_VERBOSE_S << "Evaluation ARI (GrowThreshold: %f) ...\n", fGrowThreshold);
976  std::vector<TResultRegion> aRegions = pARI->calculateRegions();
977 
978  ARMARX_VERBOSE_S << "Extracted %ld matching regions\n", aRegions.size());
979 
980  // visualize
981 
982  if (pResultImage != NULL)
983  {
984  pARI->visualize(pResultImage, aRegions);
985  pResultImage->SaveToFile("/home/staff/schieben/datalog/reco.bmp");
986  }
987 
988 
989  // find object center
990  if (aRegionList.size() > 0)
991  {
992  // calculate a 3D position: calculate the 3D line defined by the 2D image point, and find the closest point in the scene point cloud
993  Vec2d vCenter2D = aRegionList.at(0).region.centroid;
994  Vec3d vPoint1, vPoint2, vDirection, vTemp1, vTemp2;
995  calibration->ImageToWorldCoordinates(vCenter2D, vPoint1, 100.0f, false);
996  calibration->ImageToWorldCoordinates(vCenter2D, vPoint2, 200.0f, false);
997  Math3d::SubtractVecVec(vPoint1, vPoint2, vDirection);
998  Math3d::NormalizeVec(vDirection);
999  float fDist, fMinDist = 10000, fBestZ = 500;
1000 
1001  for (size_t i = 0; i < aScenePoints.size(); i++)
1002  {
1003  Math3d::SubtractVecVec(aScenePoints.at(i)->vPosition, vPoint1, vTemp1);
1004  Math3d::CrossProduct(vTemp1, vDirection, vTemp2);
1005  fDist = Math3d::Length(vTemp2);
1006 
1007  if (fDist < fMinDist)
1008  {
1009  fMinDist = fDist;
1010  fBestZ = aScenePoints.at(i)->vPosition.z;
1011  }
1012  }
1013 
1014  // get the point corresponding to the pixel with the same distance to the camera as the closest scene point
1015  calibration->ImageToWorldCoordinates(vCenter2D, vPosition, fBestZ, false);
1016 
1017  Math3d::SetMat(mOrientation, Math3d::unit_mat);
1018  fProbability = aRegionList.at(0).fActivation;
1019  }
1020  else
1021  {
1022  Math3d::SetVec(vPosition, 0, 0, 0);
1023  Math3d::SetMat(mOrientation, Math3d::unit_mat);
1024  fProbability = 0;
1025  }
1026 
1027 #endif
1028 }
1029 
1030 void
1031 CObjectRecognition::FindAllObjectsRFCH(const CByteImage* pRGBImage,
1032  const CCalibration* calibration,
1033  const std::vector<CHypothesisPoint*>& aScenePoints,
1034  std::vector<std::string>& aNames,
1035  std::vector<Vec3d>& aPositions,
1036  std::vector<Mat3d>& aOrientations,
1037  std::vector<float>& aProbabilities)
1038 {
1039 #ifdef OLP_USE_EARLYVISION
1040  CByteImage* pResultImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
1041  ImageProcessor::CopyImage(pRGBImage, pResultImage);
1042  CByteImage* pRGBImageCopy = new CByteImage(pRGBImage);
1043  ImageProcessor::CopyImage(pRGBImage, pRGBImageCopy);
1044 
1045  // load the names of the known objects, and the numbers of the respective descriptor file to be used
1046  std::string sObjectNamesFileName = OLP_OBJECT_LEARNING_DIR;
1047  sObjectNamesFileName += "names.txt";
1048  FILE* pObjectNamesFile = fopen(sObjectNamesFileName.c_str(), "rt");
1049 
1050  if (pObjectNamesFile == NULL)
1051  {
1052  ARMARX_VERBOSE_S << "\nOpening file with object names failed!\n\n");
1053  return;
1054  }
1055 
1056  int nNumObjects;
1057  fscanf(pObjectNamesFile, "%d", &nNumObjects);
1058  char pcObjName[100];
1059  int nDescriptorNumber;
1060  std::string sObjName;
1061  std::vector<int> aDescriptorNumbers;
1062  aNames.clear();
1063 
1064  for (int i = 0; i < nNumObjects; i++)
1065  {
1066  fscanf(pObjectNamesFile, "%s", pcObjName);
1067  fscanf(pObjectNamesFile, "%d", &nDescriptorNumber);
1068  sObjName = pcObjName;
1069  aNames.push_back(sObjName);
1070  aDescriptorNumbers.push_back(nDescriptorNumber);
1071  }
1072 
1073  aPositions.resize(nNumObjects);
1074  aOrientations.resize(nNumObjects);
1075  aProbabilities.resize(nNumObjects);
1076  fclose(pObjectNamesFile);
1077 
1078  // try to recognize the objects
1079  const int nNumberClusters = 30;
1080  CRFCSignatureFeatureEntry* pFeatureDescriptor =
1081  new CRFCSignatureFeatureEntry(2, nNumberClusters);
1082 
1083  for (int i = 0; i < nNumObjects; i++)
1084  {
1085  ARMARX_VERBOSE_S << "\n%s\n", aNames.at(i).c_str());
1086  LoadObjectDescriptorRFCH(aNames.at(i), aDescriptorNumbers.at(i), pFeatureDescriptor);
1087  FindObjectRFCH(pFeatureDescriptor,
1088  pRGBImageCopy,
1089  aScenePoints,
1090  calibration,
1091  aPositions.at(i),
1092  aOrientations.at(i),
1093  aProbabilities.at(i),
1094  pResultImage);
1095  }
1096 
1097  delete pFeatureDescriptor;
1098 
1099  pResultImage->SaveToFile("/home/staff/schieben/datalog/reco.bmp");
1100  delete pResultImage;
1101 #endif
1102 }
CObjectRecognition::SaveObjectDescriptorPCD
static void SaveObjectDescriptorPCD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:58
CObjectRecognition::FindObjectRGBD
static void FindObjectRGBD(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, Vec3d &vPosition, Mat3d &mOrientation, float &distance, float &fProbability, CByteImage *pResultImage=NULL)
Definition: ObjectRecognition.cpp:563
PointCloudRegistration.h
ARMARX_IMPORTANT_S
#define ARMARX_IMPORTANT_S
Definition: Logging.h:210
CPointCloudRegistration::EstimateTransformation
float EstimateTransformation(const std::vector< CHypothesisPoint * > &aObjectPoints, const Vec3d center, Mat3d &mRotation, Vec3d &vTranslation, const Vec3d upwardsVector, const int nEffortLevel=OLP_EFFORT_POINTCLOUD_MATCHING, const std::vector< Vec3d > *pPossibleLocationOffsets=NULL, std::vector< CColorICP::CPointXYZRGBI > *pNearestNeighbors=NULL, std::vector< float > *pPointMatchDistances=NULL, const std::vector< CHypothesisPoint * > *pAdditionalPoints=NULL, const float maxAcceptedDistance=FLT_MAX)
Definition: PointCloudRegistration.cpp:85
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:737
COLPTools::GetMeanAndVariance
void GetMeanAndVariance(const std::vector< CHypothesisPoint * > &pHypothesisPoints, Vec3d &vMean, float &fVariance)
Definition: OLPTools.cpp:772
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CObjectRecognition::FindAllObjectsRFCH
static void FindAllObjectsRFCH(const CByteImage *pRGBImage, const CCalibration *calibration, const std::vector< CHypothesisPoint * > &aScenePoints, std::vector< std::string > &aNames, std::vector< Vec3d > &aPositions, std::vector< Mat3d > &aOrientations, std::vector< float > &aProbabilities)
Definition: ObjectRecognition.cpp:1031
CHypothesisPoint::fIntensity
float fIntensity
Definition: ObjectHypothesis.h:238
COLPTools::GetHistogramDistanceX2
float GetHistogramDistanceX2(const std::vector< float > &aHistogram1, const std::vector< float > &aHistogram2)
Definition: OLPTools.cpp:1247
COLPTools::SetNumberInFileName
void SetNumberInFileName(std::string &sFileName, int nNumber, int nNumDigits)
Definition: OLPTools.cpp:1464
CObjectRecognition::LoadObjectDescriptorRFCH
static void LoadObjectDescriptorRFCH(const std::string sObjectName, const int nDescriptorNumber, CRFCSignatureFeatureEntry *&pFeatureDescriptor)
Definition: ObjectRecognition.cpp:869
CObjectRecognition::FindPossibleObjectLocations
static void FindPossibleObjectLocations(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, std::vector< Vec3d > &aPossibleLocations, const int desiredNumberOfLocations=0)
Definition: ObjectRecognition.cpp:269
CObjectRecognition::SaveObjectDescriptorRFCH
static void SaveObjectDescriptorRFCH(const CByteImage *pRGBImage, const CByteImage *pObjectMask, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:801
COLPTools::DrawCross
void DrawCross(CByteImage *pGreyImage, int x, int y, int nBrightness)
Definition: OLPTools.cpp:1265
CObjectHypothesis::eType
eObjectType eType
Definition: ObjectHypothesis.h:292
CHypothesisPoint::fColorG
float fColorG
Definition: ObjectHypothesis.h:238
CObjectRecognition::FindObjectRFCH
static void FindObjectRFCH(CRFCSignatureFeatureEntry *pFeatureDescriptor, CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, Vec3d &vPosition, Mat3d &mOrientation, float &fProbability, CByteImage *pResultImage=NULL)
Definition: ObjectRecognition.cpp:895
COLPTools::CreateHueAndSaturationHistogramInWindow
void CreateHueAndSaturationHistogramInWindow(const CByteImage *pHSVImage, const int nMinX, const int nMinY, const int nMaxX, const int nMaxY, std::vector< float > &aHueHistogram, std::vector< float > &aSaturationHistogram)
Definition: OLPTools.cpp:1132
CHypothesisPoint::vPosition
Vec3d vPosition
Definition: ObjectHypothesis.h:235
COLPTools::ConfirmedPointsToPCL
pcl::PointCloud< pcl::PointXYZRGBA >::Ptr ConfirmedPointsToPCL(const CObjectHypothesis *pHypothesis)
Definition: OLPTools.cpp:979
ObjectLearningByPushingDefinitions.h
COLPTools::NormalizeHistogram
void NormalizeHistogram(std::vector< float > &aHistogram)
Definition: OLPTools.cpp:1196
CObjectHypothesis::eRGBD
@ eRGBD
Definition: ObjectHypothesis.h:287
OLPTools.h
CHypothesisPoint
Definition: ObjectHypothesis.h:166
ObjectRecognition.h
CObjectRecognition::FindAllObjectsRGBD
static void FindAllObjectsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, std::vector< std::string > &aNames, std::vector< Vec3d > &aPositions, std::vector< Mat3d > &aOrientations, std::vector< float > &aProbabilities)
Definition: ObjectRecognition.cpp:666
OLP_EFFORT_POINTCLOUD_MATCHING
#define OLP_EFFORT_POINTCLOUD_MATCHING
Definition: ObjectLearningByPushingDefinitions.h:274
CObjectHypothesis::vCenter
Vec3d vCenter
Definition: ObjectHypothesis.h:311
CPointCloudRegistration
Definition: PointCloudRegistration.h:33
OLP_OBJECT_LEARNING_DIR
#define OLP_OBJECT_LEARNING_DIR
Definition: ObjectLearningByPushingDefinitions.h:175
COLPTools::CreateHueAndSaturationHistogram
void CreateHueAndSaturationHistogram(const CObjectHypothesis *pHypothesis, std::vector< float > &aHueHistogram, std::vector< float > &aSaturationHistogram)
Definition: OLPTools.cpp:1071
filename
std::string filename
Definition: VisualizationRobot.cpp:86
COLPTools::ClusterXMeans
void ClusterXMeans(const std::vector< CHypothesisPoint * > &aPoints, const int nMinNumClusters, const int nMaxNumClusters, const float fBICFactor, std::vector< std::vector< CHypothesisPoint * >> &aaPointClusters)
Definition: OLPTools.cpp:78
CHypothesisPoint::eDepthMapPoint
@ eDepthMapPoint
Definition: ObjectHypothesis.h:232
armarx::to_string
const std::string & to_string(const std::string &s)
Definition: StringHelpers.h:41
CHypothesisPoint::fColorR
float fColorR
Definition: ObjectHypothesis.h:238
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:736
CObjectHypothesis
Definition: ObjectHypothesis.h:244
GfxTL::sqrt
VectorXD< D, T > sqrt(const VectorXD< D, T > &a)
Definition: VectorXD.h:704
CObjectRecognition::LoadObjectDescriptorRGBD
static bool LoadObjectDescriptorRGBD(const std::string sObjectName, const int nDescriptorNumber, CObjectHypothesis *&pHypothesis)
Definition: ObjectRecognition.cpp:199
armarx::ctrlutil::v
double v(double t, double v0, double a0, double j)
Definition: CtrlUtil.h:39
OLP_MAKE_INTERMEDIATE_SCREENSHOTS
#define OLP_MAKE_INTERMEDIATE_SCREENSHOTS
Definition: ObjectLearningByPushingDefinitions.h:217
float
#define float
Definition: 16_Level.h:22
CObjectHypothesis::aNewPoints
std::vector< CHypothesisPoint * > aNewPoints
Definition: ObjectHypothesis.h:294
OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION
#define OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION
Definition: ObjectLearningByPushingDefinitions.h:279
CObjectRecognition::FindSimilarRegionsWithHueAndSaturationHistogram
static void FindSimilarRegionsWithHueAndSaturationHistogram(const CByteImage *pHSVImage, const std::vector< float > &aHueHistogram, const std::vector< float > &aSaturationHistogram, CByteImage *pProbabilityImage)
Definition: ObjectRecognition.cpp:463
CHypothesisPoint::ePointType
EPointType ePointType
Definition: ObjectHypothesis.h:237
CObjectHypothesis::aConfirmedPoints
std::vector< CHypothesisPoint * > aConfirmedPoints
Definition: ObjectHypothesis.h:295
ARMARX_VERBOSE_S
#define ARMARX_VERBOSE_S
Definition: Logging.h:207
distance
double distance(const Point &a, const Point &b)
Definition: point.hpp:95
ARMARX_INFO_S
#define ARMARX_INFO_S
Definition: Logging.h:202
OLP_SCREENSHOT_PATH
#define OLP_SCREENSHOT_PATH
Definition: ObjectLearningByPushingDefinitions.h:221
Logging.h
CHypothesisPoint::fColorB
float fColorB
Definition: ObjectHypothesis.h:238
CPointCloudRegistration::SetScenePointcloud
void SetScenePointcloud(const std::vector< CHypothesisPoint * > &aScenePoints)
Definition: PointCloudRegistration.cpp:64
CObjectRecognition::FindObjectWithManyDescriptorsRGBD
static void FindObjectWithManyDescriptorsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, const std::string objectName, const int numDescriptors)
Definition: ObjectRecognition.cpp:753
CObjectRecognition::SaveObjectDescriptorRGBD
static void SaveObjectDescriptorRGBD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:110