ObjectRecognition.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 #include "ObjectRecognition.h"
26 #include "OLPTools.h"
27 #include "PointCloudRegistration.h"
28 
29 // IVT
30 #include <Image/ByteImage.h>
31 #include <Image/ImageProcessor.h>
32 #include <Calibration/Calibration.h>
33 #include <Calibration/StereoCalibration.h>
34 
35 #ifdef OLP_USE_EARLYVISION
36 // EV
37 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/RFCSignatureCalculator.h>
38 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/RFCSignatureFeatureEntry.h>
39 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/Descriptors/HueDescriptor.h>
40 #include </home/staff/schieben/EarlyVision/src/Base/Features/RFCH/Descriptors/HueLaplacianDescriptor.h>
41 #include </home/staff/schieben/EarlyVision/src/Base/Vision/Recognition/AdditiveRegionImage.h>
42 #endif
43 
44 // OpenMP
45 #include <omp.h>
46 
47 // stdlib
48 #include <cmath>
49 #include <cstdlib>
50 
52 
53 // #include <fstream>
54 #include <pcl/io/pcd_io.h>
55 
56 void CObjectRecognition::SaveObjectDescriptorPCD(const CObjectHypothesis* pHypothesis, const std::string sObjectName, const int nDescriptorNumber)
57 {
58  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
59  sFileName.append(sObjectName);
60  ARMARX_VERBOSE_S << "Saving object descriptor. File name: " << sFileName.c_str();
61 
62  // save the confirmed points
63  std::string sDescNum = std::to_string(nDescriptorNumber);
64  sFileName += "-cloud" + std::string(2 - sDescNum.length(), '0') + sDescNum + ".pcd";
65 
66  auto cloud = COLPTools::ConfirmedPointsToPCL(pHypothesis);
67  // pcl::io::savePCDFileASCII(sFileName, *cloud);
68  pcl::io::savePCDFileBinary(sFileName, *cloud);
69 
70  /*
71  std::ofstream fs(sFileName);
72  if (!fs.is_open())
73  {
74  ARMARX_WARNING << "Could not write to file '" << sFileName << ".pcd'" << armarx::flush;
75  return;
76  }
77 
78  fs << "VERSION 0.7\n";
79  fs << "FIELDS x y z rgba\n";
80  fs << "SIZE 4 4 4 4\n";
81  fs << "TYPE F F F U\n";
82  fs << "COUNT 1 1 1 1\n";
83  fs << "WIDTH " << pHypothesis->aConfirmedPoints.size() << "\n";
84  fs << "HEIGHT 1\n";
85  fs << "VIEWPOINT 0 0 0 1 0 0 0\n";
86  fs << "POINTS " << pHypothesis->aConfirmedPoints.size() << "\n";
87  fs << "DATA ascii\n";
88 
89  Vec3d vCenter = pHypothesis->vCenter;
90  for (const CHypothesisPoint* p : pHypothesis->aConfirmedPoints)
91  {
92  uint32_t rgb = 0;
93  rgb |= static_cast<uint8_t>(255.0f * p->fColorR * p->fIntensity) << 16;
94  rgb |= static_cast<uint8_t>(255.0f * p->fColorG * p->fIntensity) << 8;
95  rgb |= static_cast<uint8_t>(255.0f * p->fColorB * p->fIntensity);
96 
97  fs << p->vPosition.x - vCenter.x << " " << p->vPosition.y - vCenter.y << " " << p->vPosition.z - vCenter.z << " ";
98  fs << rgb << "\n";
99  }
100 
101  fs.close();
102  */
103 }
104 
105 void CObjectRecognition::SaveObjectDescriptorRGBD(const CObjectHypothesis* pHypothesis, const std::string sObjectName, const int nDescriptorNumber)
106 {
107  setlocale(LC_NUMERIC, "C");
108  //printf ("Locale is: %s\n", setlocale(LC_NUMERIC, NULL));
109 
110  // generate filename and create file
111  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
112  sFileName.append(sObjectName);
113  std::string sFileNamePointsOnly = sFileName;
114  std::string sFileNamePointsOnlyNoncentered = sFileName;
115  sFileName.append("00.dat");
116  sFileNamePointsOnly.append("-points00.dat");
117  sFileNamePointsOnlyNoncentered.append("-points-noncentered00.dat");
118  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
119  COLPTools::SetNumberInFileName(sFileNamePointsOnly, nDescriptorNumber, 2);
120  COLPTools::SetNumberInFileName(sFileNamePointsOnlyNoncentered, nDescriptorNumber, 2);
121  ARMARX_VERBOSE_S << "Saving object descriptor. File name: " << sFileName.c_str();
122  FILE* pFile = fopen(sFileName.c_str(), "wt");
123  FILE* pFilePointsOnly = fopen(sFileNamePointsOnly.c_str(), "wt");
124  FILE* pFilePointsOnlyNoncentered = fopen(sFileNamePointsOnlyNoncentered.c_str(), "wt");
125 
126 
127  // save the confirmed points
128  fprintf(pFile, "%ld \n", pHypothesis->aConfirmedPoints.size());
129  Vec3d vCenter = pHypothesis->vCenter;
130 
131  for (size_t i = 0; i < pHypothesis->aConfirmedPoints.size(); i++)
132  {
133  CHypothesisPoint* pPoint = pHypothesis->aConfirmedPoints.at(i);
134  fprintf(pFile, "%e %e %e %e %e %e %e \n", pPoint->vPosition.x - vCenter.x, pPoint->vPosition.y - vCenter.y, pPoint->vPosition.z - vCenter.z,
135  pPoint->fColorR, pPoint->fColorG, pPoint->fColorB, pPoint->fIntensity);
136  fprintf(pFilePointsOnly, "%e %e %e \n", pPoint->vPosition.x - vCenter.x, pPoint->vPosition.y - vCenter.y, pPoint->vPosition.z - vCenter.z);
137  fprintf(pFilePointsOnlyNoncentered, "%e %e %e \n", pPoint->vPosition.x, pPoint->vPosition.y, pPoint->vPosition.z);
138  }
139 
140  // save the candidate points too
141  if (false)
142  {
143  fprintf(pFile, "%ld \n", pHypothesis->aNewPoints.size());
144 
145  for (size_t i = 0; i < pHypothesis->aNewPoints.size(); i++)
146  {
147  CHypothesisPoint* pPoint = pHypothesis->aNewPoints.at(i);
148  fprintf(pFile, "%e %e %e %e %e %e %e \n", pPoint->vPosition.x - vCenter.x, pPoint->vPosition.y - vCenter.y, pPoint->vPosition.z - vCenter.z,
149  pPoint->fColorR, pPoint->fColorG, pPoint->fColorB, pPoint->fIntensity);
150  fprintf(pFilePointsOnly, "%e %e %e \n", pPoint->vPosition.x - vCenter.x, pPoint->vPosition.y - vCenter.y, pPoint->vPosition.z - vCenter.z);
151  fprintf(pFilePointsOnlyNoncentered, "%e %e %e \n", pPoint->vPosition.x, pPoint->vPosition.y, pPoint->vPosition.z);
152  }
153  }
154 
155  // close the file
156  fclose(pFile);
157  fclose(pFilePointsOnly);
158  fclose(pFilePointsOnlyNoncentered);
159 }
160 
161 
162 
163 bool CObjectRecognition::LoadObjectDescriptorRGBD(const std::string sObjectName, const int nDescriptorNumber, CObjectHypothesis*& pHypothesis)
164 {
165  setlocale(LC_NUMERIC, "C");
166 
167  // generate filename and open file
168  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
169  sFileName.append(sObjectName);
170  sFileName.append("00.dat");
171  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
172  FILE* pFile = fopen(sFileName.c_str(), "rt");
173 
174  if (pFile == NULL)
175  {
176  pFile = fopen(sFileName.c_str(), "rt");
177 
178  if (pFile == NULL)
179  {
180  ARMARX_VERBOSE_S << "Cannot open object file: " << sFileName.c_str();
181  pHypothesis = NULL;
182  return false;
183  }
184  }
185 
186  pHypothesis = new CObjectHypothesis();
187  pHypothesis->eType = CObjectHypothesis::eRGBD;
188  Math3d::SetVec(pHypothesis->vCenter, 0, 0, 0);
189 
190  // read the confirmed points
191  size_t nNumPoints;
192  int dummy = fscanf(pFile, "%lu", &nNumPoints);
193 
194  for (size_t i = 0; i < nNumPoints; i++)
195  {
196  CHypothesisPoint* pPoint = new CHypothesisPoint();
198  dummy += fscanf(pFile, "%e %e %e", &pPoint->vPosition.x, &pPoint->vPosition.y, &pPoint->vPosition.z);
199  dummy += fscanf(pFile, "%e %e %e %e \n", &pPoint->fColorR, &pPoint->fColorG, &pPoint->fColorB, &pPoint->fIntensity);
200  pHypothesis->aConfirmedPoints.push_back(pPoint);
201  }
202 
203  // read the candidate points
204  dummy += fscanf(pFile, "%lu", &nNumPoints);
205 
206  for (size_t i = 0; i < nNumPoints; i++)
207  {
208  CHypothesisPoint* pPoint = new CHypothesisPoint();
210  dummy += fscanf(pFile, "%e %e %e", &pPoint->vPosition.x, &pPoint->vPosition.y, &pPoint->vPosition.z);
211  dummy += fscanf(pFile, "%e %e %e %e \n", &pPoint->fColorR, &pPoint->fColorG, &pPoint->fColorB, &pPoint->fIntensity);
212  pHypothesis->aNewPoints.push_back(pPoint);
213  }
214 
215  return true;
216 }
217 
218 
219 
220 
221 
222 
223 void CObjectRecognition::FindPossibleObjectLocations(const CObjectHypothesis* pHypothesis, const CByteImage* pHSVImage, const std::vector<CHypothesisPoint*>& aScenePoints,
224  const CCalibration* calibration, std::vector<Vec3d>& aPossibleLocations, const int desiredNumberOfLocations)
225 {
226  const int width = pHSVImage->width;
227  const int height = pHSVImage->height;
228 
229  // detect possible locations of the object
230  std::vector<float> hueHistogram, saturationHistogram;
231  COLPTools::CreateHueAndSaturationHistogram(pHypothesis, hueHistogram, saturationHistogram);
232 
233  CByteImage* probabilityImage = new CByteImage(width, height, CByteImage::eGrayScale);
234  FindSimilarRegionsWithHueAndSaturationHistogram(pHSVImage, hueHistogram, saturationHistogram, probabilityImage);
235 
236  //ImageProcessor::Spread(probabilityImage, probabilityImage);
237  ::ImageProcessor::HistogramStretching(probabilityImage, probabilityImage, 0.7f, 1.0f); // 0.85, 1.0
238 
239 
240  // calculate median in x and y direction
241 
242  std::vector<long> rowSums(height, 0);
243  std::vector<long> columnSums(width, 0);
244 
245  long sum = 0;
246 
247  for (int i = 0; i < height; i++)
248  {
249  for (int j = 0; j < width; j++)
250  {
251  rowSums[i] += probabilityImage->pixels[i * width + j];
252  columnSums[j] += probabilityImage->pixels[i * width + j];
253  sum += probabilityImage->pixels[i * width + j];
254  }
255  }
256 
257  sum /= 2;
258 
259  for (int i = 1; i < height; i++)
260  {
261  rowSums[i] += rowSums[i - 1];
262  }
263 
264  for (int j = 1; j < width; j++)
265  {
266  columnSums[j] += columnSums[j - 1];
267  }
268 
269  int x = 0;
270 
271  while ((columnSums[x] < sum) && (x < width))
272  {
273  x++;
274  }
275 
276  int y = 0;
277 
278  while ((rowSums[y] < sum) && (y < height))
279  {
280  y++;
281  }
282 
283  Vec2d vMedian = {(float)x, (float)y};
284 
285 
286  // cluster probable areas
287 
288  std::vector<Vec3d> samplePoints;
289 
290  for (int i = 0; i < height; i++)
291  {
292  for (int j = 0; j < width; j++)
293  {
294  if (rand() % 256 < probabilityImage->pixels[i * width + j])
295  {
296  if (rand() % 100 < 40 + 10 * OLP_EFFORT_POINTCLOUD_MATCHING)
297  {
298  Vec3d newVec = {(float)j, (float)i, 0};
299  samplePoints.push_back(newVec);
300  }
301  }
302  }
303  }
304 
305  std::vector<std::vector<Vec3d> > clusters;
306  std::vector<std::vector<int> > oldIndices;
307 
309  {
310  COLPTools::ClusterXMeans(samplePoints, 1, 5, OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION, clusters, oldIndices);
311  }
312  else
313  {
314  COLPTools::ClusterXMeans(samplePoints, 1, 4, 0.6 * OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION, clusters, oldIndices);
315  }
316 
317  std::vector<Vec2d> locationCandidates;
318  locationCandidates.push_back(vMedian);
319 
320  if (clusters.size() > 1)
321  {
322  for (size_t i = 0; i < clusters.size(); i++)
323  {
324  Vec3d vClusterCenter3D;
325  float fTemp;
326  COLPTools::GetMeanAndVariance(clusters.at(i), vClusterCenter3D, fTemp);
327  Vec2d vClusterCenter2D = {vClusterCenter3D.x, vClusterCenter3D.y};
328  locationCandidates.push_back(vClusterCenter2D);
329  //COLPTools::DrawCross(probabilityImage, vClusterCenter2D.x, vClusterCenter2D.y, 50);
330  }
331  }
332 
333  int numAttempts = 0;
334 
335  while ((int)locationCandidates.size() < desiredNumberOfLocations && numAttempts < 10000)
336  {
337  int u = rand() % probabilityImage->width;
338  int v = rand() % probabilityImage->height;
339 
340  if (probabilityImage->pixels[v * probabilityImage->width + u] > rand() % 500)
341  {
342  Vec2d pos = {(float)u, (float)v};
343  locationCandidates.push_back(pos);
344  }
345 
346  numAttempts++;
347  }
348 
349 
350  // visualize
352  {
353  for (size_t n = 0; n < locationCandidates.size(); n++)
354  {
355  COLPTools::DrawCross(probabilityImage, locationCandidates.at(n).x, locationCandidates.at(n).y, 50);
356  }
357 
358  std::string filename = OLP_SCREENSHOT_PATH;
359  filename += "prob.bmp";
360  probabilityImage->SaveToFile(filename.c_str());
361  }
362 
363 
364  // calculate 3D positions
365  for (size_t n = 0; n < locationCandidates.size(); n++)
366  {
367  // calculate the 3D line defined by the 2D image point, and find the closest point in the scene point cloud
368  Vec3d point1, point2, direction, estimatedPosition, temp1, temp2;
369  calibration->ImageToWorldCoordinates(locationCandidates.at(n), point1, 100.0f, false);
370  calibration->ImageToWorldCoordinates(locationCandidates.at(n), point2, 200.0f, false);
371  Math3d::SubtractVecVec(point1, point2, direction);
372  Math3d::NormalizeVec(direction);
373  float fDist, fMinDist = 10000, fBestZ = 500;
374 
375  for (size_t i = 0; i < aScenePoints.size(); i++)
376  {
377  if (Math3d::Length(aScenePoints.at(i)->vPosition) > 0)
378  {
379  Math3d::SubtractVecVec(aScenePoints.at(i)->vPosition, point1, temp1);
380  Math3d::CrossProduct(temp1, direction, temp2);
381  fDist = Math3d::Length(temp2);
382 
383  if (fDist < fMinDist)
384  {
385  fMinDist = fDist;
386  fBestZ = aScenePoints.at(i)->vPosition.z;
387  }
388  }
389  }
390 
391  // get the point corresponding to the pixel with the same distance to the camera as the closest scene point
392  calibration->ImageToWorldCoordinates(locationCandidates.at(n), estimatedPosition, fBestZ, false);
393 
394  //ARMARX_VERBOSE_S << "(%d, %d) (%.1f, %.1f, %.1f)\n", locationCandidates.at(n).x, locationCandidates.at(n).y, estimatedPosition.x, estimatedPosition.y, estimatedPosition.z);
395  aPossibleLocations.push_back(estimatedPosition);
396  }
397 
398  //probabilityImage->SaveToFile("/homes/staff/schieben/datalog/prob.bmp");
399  delete probabilityImage;
400 }
401 
402 
403 
404 
405 void CObjectRecognition::FindSimilarRegionsWithHueAndSaturationHistogram(const CByteImage* pHSVImage, const std::vector<float>& aHueHistogram,
406  const std::vector<float>& aSaturationHistogram, CByteImage* pProbabilityImage)
407 {
408  const int nNumScales = 2;
409  const float fVisualizationFactor = 0.33f * 255.0f / (float)nNumScales;
410  const int nMinNumSubdivisionsX = 4;
411 
412  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
413  {
414  pProbabilityImage->pixels[i] = 0;
415  }
416 
417  std::vector<float> aHueHistogramWindow;
418  aHueHistogramWindow.resize(aHueHistogram.size());
419  std::vector<float> aSaturationHistogramWindow;
420  aSaturationHistogramWindow.resize(aSaturationHistogram.size());
421 
422  for (int n = 0; n < nNumScales; n++)
423  {
424  //const int nNumSubdivisions = nMinNumNumSubdivisions << n;
425  const int nNumSubdivisionsX = (int)((double)nMinNumSubdivisionsX * pow(sqrt(2), n));
426  const int nNumSubdivisionsY = (int)(3.0 / 4.0 * (double)nMinNumSubdivisionsX * pow(sqrt(2), n));
427  const int nWindowSizeX = OLP_IMG_WIDTH / nNumSubdivisionsX;
428  const int nWindowSizeY = OLP_IMG_HEIGHT / nNumSubdivisionsY;
429  int nMinX, nMaxX, nMinY, nMaxY;
430 
431  const int nWindowSizeX_3 = nWindowSizeX / 3;
432  const int nWindowSizeY_3 = nWindowSizeY / 3;
433 
434  for (int i = 0; i < nNumSubdivisionsY; i++)
435  {
436  for (int j = 0; j < nNumSubdivisionsX; j++)
437  {
438  // shift window
439  for (int m = -1; m <= 1; m++)
440  {
441  nMinX = j * nWindowSizeX + m * nWindowSizeX_3;
442  nMaxX = (j + 1) * nWindowSizeX + m * nWindowSizeX_3;
443  nMinY = i * nWindowSizeY + m * nWindowSizeY_3;
444  nMaxY = (i + 1) * nWindowSizeY + m * nWindowSizeY_3;
445 
446  if (nMinX < 0)
447  {
448  nMinX = 0;
449  }
450 
451  if (nMinY < 0)
452  {
453  nMinY = 0;
454  }
455 
456  if (nMaxX > OLP_IMG_WIDTH)
457  {
458  nMaxX = OLP_IMG_WIDTH;
459  }
460 
461  if (nMaxY > OLP_IMG_HEIGHT)
462  {
463  nMaxY = OLP_IMG_HEIGHT;
464  }
465 
466  COLPTools::CreateHueAndSaturationHistogramInWindow(pHSVImage, nMinX, nMinY, nMaxX, nMaxY, aHueHistogramWindow, aSaturationHistogramWindow);
467 
468  const float fHistogramDistance = 0.5f * COLPTools::GetHistogramDistanceX2(aHueHistogram, aHueHistogramWindow)
469  + 0.5f * COLPTools::GetHistogramDistanceX2(aSaturationHistogram, aSaturationHistogramWindow);
470  const float fCorrelation = 1.0f - (0.25f * fHistogramDistance * fHistogramDistance); // distance in [0,2]
471 
472  // visualize in probability image
473  unsigned char nCorrelationValueVisualization = (unsigned char)(fVisualizationFactor * fCorrelation);
474 
475  for (int k = nMinY; k < nMaxY; k++)
476  {
477  for (int l = nMinX; l < nMaxX; l++)
478  {
479  pProbabilityImage->pixels[k * OLP_IMG_WIDTH + l] += nCorrelationValueVisualization;
480  }
481  }
482  }
483  }
484  }
485  }
486 
487 }
488 
489 
490 void CObjectRecognition::FindObjectRGBD(const CObjectHypothesis* pHypothesis, const CByteImage* pHSVImage, const std::vector<CHypothesisPoint*>& aScenePoints,
491  const CCalibration* calibration, const Vec3d upwardsVector, Vec3d& vTranslation, Mat3d& mOrientation, float& distance,
492  float& fProbability, CByteImage* pResultImage)
493 {
494  // get possible object locations based on color region similarity
495  std::vector<Vec3d> aPossibleLocations;
496  FindPossibleObjectLocations(pHypothesis, pHSVImage, aScenePoints, calibration, aPossibleLocations);
497 
498  // set up the point cloud registration
499  CPointCloudRegistration* pPointCloudRegistration = new CPointCloudRegistration();
500  pPointCloudRegistration->SetScenePointcloud(aScenePoints);
501 
502 
503  // try to match the object at the promising locations
504  distance = pPointCloudRegistration->EstimateTransformation(pHypothesis, mOrientation, vTranslation, upwardsVector, OLP_EFFORT_POINTCLOUD_MATCHING, &aPossibleLocations);
505  delete pPointCloudRegistration;
506 
507  fProbability = 1.0f / (1.0f + 0.12f * distance);
508 
509  ARMARX_IMPORTANT_S << "Distance: " << distance << ", probability: " << fProbability;
510  ARMARX_VERBOSE_S << "Translation: ( " << vTranslation.x << ", " << vTranslation.y << ", " << vTranslation.z << ")";
511 
512 
513  // visualize
514  if (pResultImage)
515  {
516  std::vector<CHypothesisPoint*> aObjectPoints;
517 
518  for (size_t i = 0; i < pHypothesis->aConfirmedPoints.size(); i++)
519  {
520  aObjectPoints.push_back(pHypothesis->aConfirmedPoints.at(i)->GetCopy());
521  Math3d::MulMatVec(mOrientation, aObjectPoints.back()->vPosition, vTranslation, aObjectPoints.back()->vPosition);
522  }
523 
524  unsigned char r, g, b;
525 
526  if (fProbability > 0.5f)
527  {
528  r = 0;
529  g = 200;
530  b = 0;
531  }
532  else
533  {
534  r = 100;
535  g = 20;
536  b = 20;
537  }
538 
539  Vec2d vPos2D;
540 
541  for (size_t i = 0; i < aObjectPoints.size(); i++)
542  {
543  calibration->WorldToImageCoordinates(aObjectPoints.at(i)->vPosition, vPos2D, false);
544 
545  if (vPos2D.x > 1 && vPos2D.y > 1 && vPos2D.x < OLP_IMG_WIDTH - 2 && vPos2D.y < OLP_IMG_HEIGHT - 2)
546  {
547  for (int j = -1; j <= 1; j++)
548  {
549  pResultImage->pixels[3 * (((int)vPos2D.y + j)*OLP_IMG_WIDTH + (int)vPos2D.x)] = r;
550  pResultImage->pixels[3 * (((int)vPos2D.y + j)*OLP_IMG_WIDTH + (int)vPos2D.x) + 1] = g;
551  pResultImage->pixels[3 * (((int)vPos2D.y + j)*OLP_IMG_WIDTH + (int)vPos2D.x) + 2] = b;
552 
553  pResultImage->pixels[3 * ((int)vPos2D.y * OLP_IMG_WIDTH + (int)vPos2D.x + j)] = r;
554  pResultImage->pixels[3 * ((int)vPos2D.y * OLP_IMG_WIDTH + (int)vPos2D.x + j) + 1] = g;
555  pResultImage->pixels[3 * ((int)vPos2D.y * OLP_IMG_WIDTH + (int)vPos2D.x + j) + 2] = b;
556  }
557  }
558  }
559 
560  // cleanup
561  for (size_t i = 0; i < aObjectPoints.size(); i++)
562  {
563  delete aObjectPoints.at(i);
564  }
565  }
566 }
567 
568 
569 
570 
571 void CObjectRecognition::FindAllObjectsRGBD(const CByteImage* pHSVImage, const CByteImage* pRGBImage, const std::vector<CHypothesisPoint*>& aScenePoints,
572  const CCalibration* calibration, const Vec3d upwardsVector, std::vector<std::string>& aNames, std::vector<Vec3d>& aPositions,
573  std::vector<Mat3d>& aOrientations, std::vector<float>& aProbabilities)
574 {
575  // load the names of the known objects, and the numbers of the respective descriptor file to be used
576  std::string sObjectNamesFileName = OLP_OBJECT_LEARNING_DIR;
577  sObjectNamesFileName += "names.txt";
578  FILE* pObjectNamesFile = fopen(sObjectNamesFileName.c_str(), "rt");
579 
580  if (pObjectNamesFile == NULL)
581  {
582  ARMARX_VERBOSE_S << "Opening file with object names failed!";
583  return;
584  }
585 
586  int nNumObjects;
587  int dummy = fscanf(pObjectNamesFile, "%d", &nNumObjects);
588  char pcObjName[100];
589  int nDescriptorNumber;
590  std::string sObjName;
591  std::vector<int> aDescriptorNumbers;
592  aNames.clear();
593 
594  for (int i = 0; i < nNumObjects; i++)
595  {
596  dummy += fscanf(pObjectNamesFile, "%s", pcObjName);
597  dummy += fscanf(pObjectNamesFile, "%d", &nDescriptorNumber);
598  sObjName = pcObjName;
599  aNames.push_back(sObjName);
600  aDescriptorNumbers.push_back(nDescriptorNumber);
601  }
602 
603  aPositions.resize(nNumObjects);
604  aOrientations.resize(nNumObjects);
605  aProbabilities.resize(nNumObjects);
606  std::vector<float> aDistances;
607  aDistances.resize(nNumObjects);
608  fclose(pObjectNamesFile);
609 
610  // debug: result images
611  CByteImage** pResultImages = new CByteImage*[nNumObjects];
612 
613  for (int i = 0; i < nNumObjects; i++)
614  {
615  pResultImages[i] = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
616  ImageProcessor::CopyImage(pRGBImage, pResultImages[i]);
617  }
618 
619  // try to recognize the objects
620  for (int i = 0; i < nNumObjects; i++)
621  {
622  CObjectHypothesis* pHypothesis;
623  ARMARX_VERBOSE_S << aNames.at(i) << " (" << i << ")";
624  LoadObjectDescriptorRGBD(aNames.at(i), aDescriptorNumbers.at(i), pHypothesis);
625  FindObjectRGBD(pHypothesis, pHSVImage, aScenePoints, calibration, upwardsVector, aPositions.at(i), aOrientations.at(i), aDistances.at(i), aProbabilities.at(i), pResultImages[i]);
626  delete pHypothesis;
627  }
628 
629 
630  // debug: write recognition results to file
631  for (int i = 0; i < nNumObjects; i++)
632  {
633  std::string sResImgName = "/home/staff/schieben/datalog/reco00.bmp";
634  COLPTools::SetNumberInFileName(sResImgName, i, 2);
635  pResultImages[i]->SaveToFile(sResImgName.c_str());
636  delete pResultImages[i];
637  }
638 
639  delete[] pResultImages;
640 }
641 
642 
643 
644 
645 void CObjectRecognition::FindObjectWithManyDescriptorsRGBD(const CByteImage* pHSVImage, const CByteImage* pRGBImage, const std::vector<CHypothesisPoint*>& aScenePoints,
646  const CCalibration* calibration, const Vec3d upwardsVector, const std::string objectName, const int numDescriptors)
647 {
648  std::string directory = OLP_OBJECT_LEARNING_DIR;
649  std::string sFileName = directory + "reco_dist_" + objectName + ".txt";
650  FILE* pFile = fopen(sFileName.c_str(), "a");
651 
652  CByteImage* resultImage = new CByteImage(pRGBImage);
653  ImageProcessor::CopyImage(pRGBImage, resultImage);
654 
655  Vec3d position;
656  Mat3d orientation;
657  float probability = -1;
658  float distance = -1;
659 
660  for (int i = 1; i <= numDescriptors; i++)
661  {
662  CObjectHypothesis* pHypothesis;
663  ARMARX_INFO_S << "Recognizing " << objectName << " (descriptor " << i << ")";
664  LoadObjectDescriptorRGBD(objectName, i, pHypothesis);
665  FindObjectRGBD(pHypothesis, pHSVImage, aScenePoints, calibration, upwardsVector, position, orientation, distance, probability, resultImage);
666  fprintf(pFile, "%e ", distance);
667  delete pHypothesis;
668  }
669 
670  std::string sResImgName = directory + "reco_result.bmp";
671  resultImage->SaveToFile(sResImgName.c_str());
672 
673  fprintf(pFile, "\n");
674  fclose(pFile);
675 }
676 
677 
678 
679 
680 
681 
682 
683 
684 
685 
686 void CObjectRecognition::SaveObjectDescriptorRFCH(const CByteImage* pRGBImage, const CByteImage* pObjectMask, const std::string sObjectName, const int nDescriptorNumber)
687 {
688 #ifdef OLP_USE_EARLYVISION
689  ///////////////////////////////////////////////////////////
690  // parameters
691  ///////////////////////////////////////////////////////////
692  const int nNumberClusters = 30; // number of clusters to use. More clusters means more descriptive but too much cluster means overfitting with varying lighting conditions.
693  const float fErrorThreshold = 0.5; // threshold for matching histograms. Should be reduced if two many matches arise
694  const float fMatchThreshold = 0.5; // threshold for assigning values to clusters
695 
696  ////////////////////////////////
697  // create signature calculator
698  ////////////////////////////////
699  CRFCSignatureCalculator* pCalc = new CRFCSignatureCalculator(nNumberClusters);
700 
701  pCalc->SetUseZeroOffset(false);
702  pCalc->SetErrorThreshold(fErrorThreshold);
703  pCalc->SetCorrespondanceDistanceFactor(fMatchThreshold);
704  pCalc->SetMinConsidered(0.5);
705 
706  ///////////////////////////////////////////////////////////
707  // setup dimension. You can use any dimension from Base/Features/RFCH/Descriptors/
708  ///////////////////////////////////////////////////////////
709  CHueDescriptor* pHue = new CHueDescriptor(0, 10, 255, 10, 255);
710  CHueLaplacianDescriptor* pHueL = new CHueLaplacianDescriptor(0, 10, 255, 10, 255);
711  pCalc->AddDimension(pHue);
712  pCalc->AddDimension(pHueL);
713 
714  pCalc->Init(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, 30, 20); // number windows: 30 x 20 y
715 
716  // train query
717  CByteImage* pRGBImageCopy = new CByteImage(pRGBImage);
718  ImageProcessor::CopyImage(pRGBImage, pRGBImageCopy);
719 
720  if (pRGBImageCopy->width != pRGBImage->width)
721  {
722  ARMARX_VERBOSE_S << "\nBuuuuugs!\n\n");
723  }
724 
725  CByteImage* pObjectMaskCopy = new CByteImage(pObjectMask);
726  ImageProcessor::CopyImage(pObjectMask, pObjectMaskCopy);
727  CRFCSignatureFeatureEntry* pFeatureDescriptor = (CRFCSignatureFeatureEntry*) pCalc->Train(pRGBImageCopy, pObjectMaskCopy);
728  CRFCSignatureCalculator::NormalizeHistogram(pFeatureDescriptor, 10000); // always do that
729 
730  // generate filename, create file, write descriptor
731  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
732  sFileName.append(sObjectName);
733  sFileName.append("00.rfc");
734  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
735  ARMARX_VERBOSE_S << "\nSaving object descriptor. File name: %s\n", sFileName.c_str());
736  FILE* pFile = fopen(sFileName.c_str(), "wt");
737 
738  if (!pFeatureDescriptor->WriteToFile(pFile))
739  {
740  ARMARX_VERBOSE_S << "\nError writing to file!\n\n");
741  }
742 
743  fclose(pFile);
744 #endif
745 }
746 
747 
748 
749 
750 void CObjectRecognition::LoadObjectDescriptorRFCH(const std::string sObjectName, const int nDescriptorNumber, CRFCSignatureFeatureEntry*& pFeatureDescriptor)
751 {
752 #ifdef OLP_USE_EARLYVISION
753  // generate filename
754  std::string sFileName = OLP_OBJECT_LEARNING_DIR;
755  sFileName.append(sObjectName);
756  sFileName.append("00.rfc");
757  COLPTools::SetNumberInFileName(sFileName, nDescriptorNumber, 2);
758  FILE* pFile = fopen(sFileName.c_str(), "rt");
759 
760  if (pFile == NULL)
761  {
762  ARMARX_VERBOSE_S << "\nCannot open object file: %s\n\n", sFileName.c_str());
763  pFeatureDescriptor = NULL;
764  }
765  else
766  {
767  pFeatureDescriptor->ReadFromFile(pFile);
768  }
769 
770 #endif
771 }
772 
773 
774 
775 
776 void CObjectRecognition::FindObjectRFCH(CRFCSignatureFeatureEntry* pFeatureDescriptor, CByteImage* pRGBImage, const std::vector<CHypothesisPoint*>& aScenePoints,
777  const CCalibration* calibration, Vec3d& vPosition, Mat3d& mOrientation, float& fProbability, CByteImage* pResultImage)
778 {
779 #ifdef OLP_USE_EARLYVISION
780  ///////////////////////////////////////////////////////////
781  // parameters
782  ///////////////////////////////////////////////////////////
783  const int nNumberClusters = 30; // number of clusters to use. More clusters means more descriptive but too much cluster means overfitting with varying lighting conditions.
784  const float fErrorThreshold = 0.5; // threshold for matching histograms. Should be reduced if two many matches arise
785  const float fMatchThreshold = 0.5; // threshold for assigning values to clusters
786  float fGrowThreshold = 0.8; // grow threshold of the additive region image. If the detected area is too big, reduce this value. Otherwise increase.
787 
788  ////////////////////////////////
789  // create signature calculator
790  ////////////////////////////////
791  CRFCSignatureCalculator* pCalc = new CRFCSignatureCalculator(nNumberClusters);
792 
793  pCalc->SetUseZeroOffset(false);
794  pCalc->SetErrorThreshold(fErrorThreshold);
795  pCalc->SetCorrespondanceDistanceFactor(fMatchThreshold);
796  pCalc->SetMinConsidered(0.5);
797 
798  ///////////////////////////////////////////////////////////
799  // setup dimension. You can use any dimension from Base/Features/RFCH/Descriptors/
800  ///////////////////////////////////////////////////////////
801  CHueDescriptor* pHue = new CHueDescriptor(0, 10, 255, 10, 255);
802  CHueLaplacianDescriptor* pHueL = new CHueLaplacianDescriptor(0, 10, 255, 10, 255);
803  pCalc->AddDimension(pHue);
804  pCalc->AddDimension(pHueL);
805 
806  pCalc->Init(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, 30, 20); // number windows: 30 x 20 y
807 
808 
809  // match in scene
810  CFeatureEntry* pfQuery = pFeatureDescriptor;
811  pCalc->PrepareMatching(pRGBImage, NULL, &pfQuery, 1); // query one feature
812  vector<TRFCMatch> aMatches = pCalc->Match(pfQuery, 2, 2, 4, 4); // 2,2: min number of windows x,y 4,4: max number of windows x,y
813 
814  ARMARX_VERBOSE_S << "%ld matches extracted\n", aMatches.size());
815 
816  // apply ARI
817  int nBoarderOffset = pCalc->GetBoarderOffset();
818  int nNumberWindowsX, nNumberWindowsY, nWindowPixelsX, nWindowPixelsY;
819 
820  pCalc->GetNumberWindows(nNumberWindowsX, nNumberWindowsY); // get number of search windows
821  pCalc->GetWindowSize(nWindowPixelsX, nWindowPixelsY); // get size of minimum window size
822 
823  CAdditiveRegionImage* pARI = new CAdditiveRegionImage(nNumberWindowsX, nNumberWindowsY, nWindowPixelsX, nWindowPixelsY, fGrowThreshold);
824  pARI->setOffsets(nBoarderOffset, nBoarderOffset);
825 
826  vector<TRFCMatch>::iterator iter = aMatches.begin();
827  int nID = 0;
828  std::vector<TRegionListEntry> aRegionList;
829 
830  while (iter != aMatches.end())
831  {
832  TRegionListEntry entry;
833  entry.region = iter->region;
834  entry.fActivation = iter->fMatch;
835  entry.id = nID;
836  entry.bUse = true;
837 
838  aRegionList.push_back(entry);
839 
840  nID++;
841  iter++;
842  }
843 
844  pARI->set(aRegionList);
845  ARMARX_VERBOSE_S << "Evaluation ARI (GrowThreshold: %f) ...\n", fGrowThreshold);
846  std::vector<TResultRegion> aRegions = pARI->calculateRegions();
847 
848  ARMARX_VERBOSE_S << "Extracted %ld matching regions\n", aRegions.size());
849 
850  // visualize
851 
852  if (pResultImage != NULL)
853 {
854  pARI->visualize(pResultImage, aRegions);
855  pResultImage->SaveToFile("/home/staff/schieben/datalog/reco.bmp");
856  }
857 
858 
859  // find object center
860  if (aRegionList.size() > 0)
861 {
862  // calculate a 3D position: calculate the 3D line defined by the 2D image point, and find the closest point in the scene point cloud
863  Vec2d vCenter2D = aRegionList.at(0).region.centroid;
864  Vec3d vPoint1, vPoint2, vDirection, vTemp1, vTemp2;
865  calibration->ImageToWorldCoordinates(vCenter2D, vPoint1, 100.0f, false);
866  calibration->ImageToWorldCoordinates(vCenter2D, vPoint2, 200.0f, false);
867  Math3d::SubtractVecVec(vPoint1, vPoint2, vDirection);
868  Math3d::NormalizeVec(vDirection);
869  float fDist, fMinDist = 10000, fBestZ = 500;
870 
871  for (size_t i = 0; i < aScenePoints.size(); i++)
872  {
873  Math3d::SubtractVecVec(aScenePoints.at(i)->vPosition, vPoint1, vTemp1);
874  Math3d::CrossProduct(vTemp1, vDirection, vTemp2);
875  fDist = Math3d::Length(vTemp2);
876 
877  if (fDist < fMinDist)
878  {
879  fMinDist = fDist;
880  fBestZ = aScenePoints.at(i)->vPosition.z;
881  }
882  }
883 
884  // get the point corresponding to the pixel with the same distance to the camera as the closest scene point
885  calibration->ImageToWorldCoordinates(vCenter2D, vPosition, fBestZ, false);
886 
887  Math3d::SetMat(mOrientation, Math3d::unit_mat);
888  fProbability = aRegionList.at(0).fActivation;
889  }
890  else
891  {
892  Math3d::SetVec(vPosition, 0, 0, 0);
893  Math3d::SetMat(mOrientation, Math3d::unit_mat);
894  fProbability = 0;
895  }
896 
897 #endif
898 }
899 
900 
901 
902 
903 void CObjectRecognition::FindAllObjectsRFCH(const CByteImage* pRGBImage, const CCalibration* calibration, const std::vector<CHypothesisPoint*>& aScenePoints,
904  std::vector<std::string>& aNames, std::vector<Vec3d>& aPositions, std::vector<Mat3d>& aOrientations, std::vector<float>& aProbabilities)
905 {
906 #ifdef OLP_USE_EARLYVISION
907  CByteImage* pResultImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
908  ImageProcessor::CopyImage(pRGBImage, pResultImage);
909  CByteImage* pRGBImageCopy = new CByteImage(pRGBImage);
910  ImageProcessor::CopyImage(pRGBImage, pRGBImageCopy);
911 
912  // load the names of the known objects, and the numbers of the respective descriptor file to be used
913  std::string sObjectNamesFileName = OLP_OBJECT_LEARNING_DIR;
914  sObjectNamesFileName += "names.txt";
915  FILE* pObjectNamesFile = fopen(sObjectNamesFileName.c_str(), "rt");
916 
917  if (pObjectNamesFile == NULL)
918  {
919  ARMARX_VERBOSE_S << "\nOpening file with object names failed!\n\n");
920  return;
921  }
922 
923  int nNumObjects;
924  fscanf(pObjectNamesFile, "%d", &nNumObjects);
925  char pcObjName[100];
926  int nDescriptorNumber;
927  std::string sObjName;
928  std::vector<int> aDescriptorNumbers;
929  aNames.clear();
930 
931  for (int i = 0; i < nNumObjects; i++)
932  {
933  fscanf(pObjectNamesFile, "%s", pcObjName);
934  fscanf(pObjectNamesFile, "%d", &nDescriptorNumber);
935  sObjName = pcObjName;
936  aNames.push_back(sObjName);
937  aDescriptorNumbers.push_back(nDescriptorNumber);
938  }
939 
940  aPositions.resize(nNumObjects);
941  aOrientations.resize(nNumObjects);
942  aProbabilities.resize(nNumObjects);
943  fclose(pObjectNamesFile);
944 
945  // try to recognize the objects
946  const int nNumberClusters = 30;
947  CRFCSignatureFeatureEntry* pFeatureDescriptor = new CRFCSignatureFeatureEntry(2, nNumberClusters);
948 
949  for (int i = 0; i < nNumObjects; i++)
950  {
951  ARMARX_VERBOSE_S << "\n%s\n", aNames.at(i).c_str());
952  LoadObjectDescriptorRFCH(aNames.at(i), aDescriptorNumbers.at(i), pFeatureDescriptor);
953  FindObjectRFCH(pFeatureDescriptor, pRGBImageCopy, aScenePoints, calibration, aPositions.at(i), aOrientations.at(i), aProbabilities.at(i), pResultImage);
954  }
955 
956  delete pFeatureDescriptor;
957 
958  pResultImage->SaveToFile("/home/staff/schieben/datalog/reco.bmp");
959  delete pResultImage;
960 #endif
961 }
962 
963 
964 
965 
966 
967 
968 
CObjectRecognition::SaveObjectDescriptorPCD
static void SaveObjectDescriptorPCD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:56
CObjectRecognition::FindObjectRGBD
static void FindObjectRGBD(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, Vec3d &vPosition, Mat3d &mOrientation, float &distance, float &fProbability, CByteImage *pResultImage=NULL)
Definition: ObjectRecognition.cpp:490
GfxTL::sqrt
VectorXD< D, T > sqrt(const VectorXD< D, T > &a)
Definition: VectorXD.h:662
PointCloudRegistration.h
ARMARX_IMPORTANT_S
#define ARMARX_IMPORTANT_S
Definition: Logging.h:203
CPointCloudRegistration::EstimateTransformation
float EstimateTransformation(const std::vector< CHypothesisPoint * > &aObjectPoints, const Vec3d center, Mat3d &mRotation, Vec3d &vTranslation, const Vec3d upwardsVector, const int nEffortLevel=OLP_EFFORT_POINTCLOUD_MATCHING, const std::vector< Vec3d > *pPossibleLocationOffsets=NULL, std::vector< CColorICP::CPointXYZRGBI > *pNearestNeighbors=NULL, std::vector< float > *pPointMatchDistances=NULL, const std::vector< CHypothesisPoint * > *pAdditionalPoints=NULL, const float maxAcceptedDistance=FLT_MAX)
Definition: PointCloudRegistration.cpp:91
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
COLPTools::GetMeanAndVariance
void GetMeanAndVariance(const std::vector< CHypothesisPoint * > &pHypothesisPoints, Vec3d &vMean, float &fVariance)
Definition: OLPTools.cpp:699
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CObjectRecognition::FindAllObjectsRFCH
static void FindAllObjectsRFCH(const CByteImage *pRGBImage, const CCalibration *calibration, const std::vector< CHypothesisPoint * > &aScenePoints, std::vector< std::string > &aNames, std::vector< Vec3d > &aPositions, std::vector< Mat3d > &aOrientations, std::vector< float > &aProbabilities)
Definition: ObjectRecognition.cpp:903
CHypothesisPoint::fIntensity
float fIntensity
Definition: ObjectHypothesis.h:241
COLPTools::GetHistogramDistanceX2
float GetHistogramDistanceX2(const std::vector< float > &aHistogram1, const std::vector< float > &aHistogram2)
Definition: OLPTools.cpp:1172
COLPTools::SetNumberInFileName
void SetNumberInFileName(std::string &sFileName, int nNumber, int nNumDigits)
Definition: OLPTools.cpp:1378
CObjectRecognition::LoadObjectDescriptorRFCH
static void LoadObjectDescriptorRFCH(const std::string sObjectName, const int nDescriptorNumber, CRFCSignatureFeatureEntry *&pFeatureDescriptor)
Definition: ObjectRecognition.cpp:750
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:694
CObjectRecognition::FindPossibleObjectLocations
static void FindPossibleObjectLocations(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, std::vector< Vec3d > &aPossibleLocations, const int desiredNumberOfLocations=0)
Definition: ObjectRecognition.cpp:223
CObjectRecognition::SaveObjectDescriptorRFCH
static void SaveObjectDescriptorRFCH(const CByteImage *pRGBImage, const CByteImage *pObjectMask, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:686
COLPTools::DrawCross
void DrawCross(CByteImage *pGreyImage, int x, int y, int nBrightness)
Definition: OLPTools.cpp:1189
CObjectHypothesis::eType
eObjectType eType
Definition: ObjectHypothesis.h:289
CHypothesisPoint::fColorG
float fColorG
Definition: ObjectHypothesis.h:241
CObjectRecognition::FindObjectRFCH
static void FindObjectRFCH(CRFCSignatureFeatureEntry *pFeatureDescriptor, CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, Vec3d &vPosition, Mat3d &mOrientation, float &fProbability, CByteImage *pResultImage=NULL)
Definition: ObjectRecognition.cpp:776
COLPTools::CreateHueAndSaturationHistogramInWindow
void CreateHueAndSaturationHistogramInWindow(const CByteImage *pHSVImage, const int nMinX, const int nMinY, const int nMaxX, const int nMaxY, std::vector< float > &aHueHistogram, std::vector< float > &aSaturationHistogram)
Definition: OLPTools.cpp:1061
CHypothesisPoint::vPosition
Vec3d vPosition
Definition: ObjectHypothesis.h:238
COLPTools::ConfirmedPointsToPCL
pcl::PointCloud< pcl::PointXYZRGBA >::Ptr ConfirmedPointsToPCL(const CObjectHypothesis *pHypothesis)
Definition: OLPTools.cpp:906
ObjectLearningByPushingDefinitions.h
COLPTools::NormalizeHistogram
void NormalizeHistogram(std::vector< float > &aHistogram)
Definition: OLPTools.cpp:1122
CObjectHypothesis::eRGBD
@ eRGBD
Definition: ObjectHypothesis.h:285
OLPTools.h
CHypothesisPoint
Definition: ObjectHypothesis.h:171
ObjectRecognition.h
CObjectRecognition::FindAllObjectsRGBD
static void FindAllObjectsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, std::vector< std::string > &aNames, std::vector< Vec3d > &aPositions, std::vector< Mat3d > &aOrientations, std::vector< float > &aProbabilities)
Definition: ObjectRecognition.cpp:571
OLP_EFFORT_POINTCLOUD_MATCHING
#define OLP_EFFORT_POINTCLOUD_MATCHING
Definition: ObjectLearningByPushingDefinitions.h:261
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:695
CObjectHypothesis::vCenter
Vec3d vCenter
Definition: ObjectHypothesis.h:308
CPointCloudRegistration
Definition: PointCloudRegistration.h:34
OLP_OBJECT_LEARNING_DIR
#define OLP_OBJECT_LEARNING_DIR
Definition: ObjectLearningByPushingDefinitions.h:162
COLPTools::CreateHueAndSaturationHistogram
void CreateHueAndSaturationHistogram(const CObjectHypothesis *pHypothesis, std::vector< float > &aHueHistogram, std::vector< float > &aSaturationHistogram)
Definition: OLPTools.cpp:1003
filename
std::string filename
Definition: VisualizationRobot.cpp:84
CHypothesisPoint::eDepthMapPoint
@ eDepthMapPoint
Definition: ObjectHypothesis.h:235
armarx::to_string
const std::string & to_string(const std::string &s)
Definition: StringHelpers.h:40
CHypothesisPoint::fColorR
float fColorR
Definition: ObjectHypothesis.h:241
COLPTools::ClusterXMeans
void ClusterXMeans(const std::vector< CHypothesisPoint * > &aPoints, const int nMinNumClusters, const int nMaxNumClusters, const float fBICFactor, std::vector< std::vector< CHypothesisPoint * > > &aaPointClusters)
Definition: OLPTools.cpp:76
CObjectHypothesis
Definition: ObjectHypothesis.h:249
CObjectRecognition::LoadObjectDescriptorRGBD
static bool LoadObjectDescriptorRGBD(const std::string sObjectName, const int nDescriptorNumber, CObjectHypothesis *&pHypothesis)
Definition: ObjectRecognition.cpp:163
armarx::ctrlutil::v
double v(double t, double v0, double a0, double j)
Definition: CtrlUtil.h:39
OLP_MAKE_INTERMEDIATE_SCREENSHOTS
#define OLP_MAKE_INTERMEDIATE_SCREENSHOTS
Definition: ObjectLearningByPushingDefinitions.h:202
float
#define float
Definition: 16_Level.h:22
CObjectHypothesis::aNewPoints
std::vector< CHypothesisPoint * > aNewPoints
Definition: ObjectHypothesis.h:291
OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION
#define OLP_CLUSTERING_FACTOR_OBJECT_LOCALIZATION
Definition: ObjectLearningByPushingDefinitions.h:266
CObjectRecognition::FindSimilarRegionsWithHueAndSaturationHistogram
static void FindSimilarRegionsWithHueAndSaturationHistogram(const CByteImage *pHSVImage, const std::vector< float > &aHueHistogram, const std::vector< float > &aSaturationHistogram, CByteImage *pProbabilityImage)
Definition: ObjectRecognition.cpp:405
CHypothesisPoint::ePointType
EPointType ePointType
Definition: ObjectHypothesis.h:240
CObjectHypothesis::aConfirmedPoints
std::vector< CHypothesisPoint * > aConfirmedPoints
Definition: ObjectHypothesis.h:292
ARMARX_VERBOSE_S
#define ARMARX_VERBOSE_S
Definition: Logging.h:200
distance
double distance(const Point &a, const Point &b)
Definition: point.hpp:88
ARMARX_INFO_S
#define ARMARX_INFO_S
Definition: Logging.h:195
OLP_SCREENSHOT_PATH
#define OLP_SCREENSHOT_PATH
Definition: ObjectLearningByPushingDefinitions.h:206
Logging.h
CHypothesisPoint::fColorB
float fColorB
Definition: ObjectHypothesis.h:241
CPointCloudRegistration::SetScenePointcloud
void SetScenePointcloud(const std::vector< CHypothesisPoint * > &aScenePoints)
Definition: PointCloudRegistration.cpp:69
CObjectRecognition::FindObjectWithManyDescriptorsRGBD
static void FindObjectWithManyDescriptorsRGBD(const CByteImage *pHSVImage, const CByteImage *pRGBImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, const Vec3d upwardsVector, const std::string objectName, const int numDescriptors)
Definition: ObjectRecognition.cpp:645
CObjectRecognition::SaveObjectDescriptorRGBD
static void SaveObjectDescriptorRGBD(const CObjectHypothesis *pHypothesis, const std::string sObjectName, const int nDescriptorNumber=0)
Definition: ObjectRecognition.cpp:105