HypothesisVisualization.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 
26 
27 #include "OLPTools.h"
28 
29 // IVT
30 #include <Calibration/Calibration.h>
31 #include <Image/ByteImage.h>
32 #include <Image/ImageProcessor.h>
33 #include <Image/IplImageAdaptor.h>
34 
35 // OpenCV
36 //#include <opencv2/opencv.hpp>
37 #include <opencv2/highgui/highgui.hpp>
38 #include <opencv2/imgproc/imgproc_c.h>
39 
40 //#include <cstdlib>
41 //#include <cstdio>
42 
43 //#include <ArmarXCore/core/logging/Logging.h>
44 
45 
47 {
48 #ifdef OLP_SHOW_RESULT_IMAGES
49  // create windows
50  //cvNamedWindow("Confirmed hypotheses", CV_WINDOW_AUTOSIZE);
51  //cvNamedWindow("Hypotheses (left)", CV_WINDOW_AUTOSIZE);
52  //cvNamedWindow("Hypotheses (right)", CV_WINDOW_AUTOSIZE);
53 #endif
54 
55  // image for visualisation
56  m_pIplImageLeft = cvCreateImage(cvSize(OLP_IMG_WIDTH, OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
57  m_pIplImageRight = cvCreateImage(cvSize(OLP_IMG_WIDTH, OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
58  m_pOldVisualizationImageIpl = cvCloneImage(m_pIplImageLeft);
59 
60  // set stereo calibration
61  this->calibration = calibration;
62 
63  // disparity stuff
64  //cvNamedWindow("Disparity", CV_WINDOW_AUTOSIZE);
65  //cvNamedWindow("Disparity edges", CV_WINDOW_AUTOSIZE);
66  //cvNamedWindow("Combined edges", CV_WINDOW_AUTOSIZE);
67  //m_pIplImageLeftRectified = cvCreateImage(cvSize(OLP_IMG_WIDTH,OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
68  //m_pIplImageRightRectified = cvCreateImage(cvSize(OLP_IMG_WIDTH,OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
69  //m_pIplImageDisparity = cvCreateImage(cvSize(OLP_IMG_WIDTH,OLP_IMG_HEIGHT), IPL_DEPTH_8U, 1);
70 
71  // define colors
72  int nMaxColors = 1000;
73  colors = new CvScalar[nMaxColors];
74  colors[0] = cvScalar(255, 20, 20);
75  colors[1] = cvScalar(0, 0, 255);
76  colors[2] = cvScalar(0, 240, 255);
77  colors[3] = cvScalar(0, 255, 0);
78  colors[4] = cvScalar(255, 255, 0);
79  colors[5] = cvScalar(255, 0, 255);
80  colors[6] = cvScalar(45, 200, 105);
81  colors[7] = cvScalar(180, 240, 150);
82  colors[8] = cvScalar(160, 30, 55);
83  colors[9] = cvScalar(230, 130, 70);
84  colors[10] = cvScalar(70, 190, 210);
85  colors[11] = cvScalar(75, 160, 110);
86  colors[12] = cvScalar(150, 210, 155);
87  colors[13] = cvScalar(150, 30, 180);
88  colors[14] = cvScalar(210, 80, 55);
89  colors[15] = cvScalar(120, 120, 120);
90  colors[16] = cvScalar(170, 70, 75);
91  colors[17] = cvScalar(170, 200, 175);
92  colors[18] = cvScalar(40, 70, 75);
93  colors[19] = cvScalar(220, 70, 190);
94 
95  for (int n = 20; n < nMaxColors; n++)
96  {
97  colors[n] = cvScalar(20 + rand() / (RAND_MAX / 235),
98  20 + rand() / (RAND_MAX / 235),
99  20 + rand() / (RAND_MAX / 235));
100  }
101 
102 
103  screenshotImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
104  segmentationImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
105  segmentedCameraImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
106  screenshotFileName = OLP_SCREENSHOT_PATH;
107  screenshotFileName.append("hyp0000.bmp");
108  segmentationImageFileName = OLP_SCREENSHOT_PATH;
109  segmentationImageFileName.append("segm0000.bmp");
110  segmentedCameraImageFileName = OLP_SCREENSHOT_PATH;
111  segmentedCameraImageFileName.append("segmcam0000.bmp");
112  screenshotCounter = 0;
113 }
114 
116 {
117 #ifdef OLP_SHOW_RESULT_IMAGES
118  cv::destroyWindow("Hypotheses (left)");
119  //cvDestroyWindow("Hypotheses (right)");
120  cv::destroyWindow("Confirmed hypotheses");
121 #endif
122  delete[] colors;
123  cvReleaseImage(&m_pIplImageLeft);
124  cvReleaseImage(&m_pIplImageRight);
125  cvReleaseImage(&m_pOldVisualizationImageIpl);
126  delete screenshotImage;
127  delete segmentationImage;
128  delete segmentedCameraImage;
129 }
130 
131 bool
132 CHypothesisVisualization::VisualizeHypotheses(const CByteImage* pByteImageColorLeft,
133  const CByteImage* pByteImageColorRight,
134  const CObjectHypothesisArray& aHypotheses,
135  const CSIFTFeatureArray& aAllPoints,
136  std::vector<CMSERDescriptor3D*>& aAllMSERs,
137  std::vector<CMSERDescriptor3D*>& aCorrespondingMSERs,
138  bool bConfirmedHypotheses,
139  CByteImage* pResultImageLeft,
140  CByteImage* pResultImageRight,
141  const bool bMakeScreenshot,
142  const bool bIsLeftImage)
143 {
144 
145  //**************************************************************************************************************
146  // visualize the found features and planes
147  //**************************************************************************************************************
148 
149 
150 #ifdef OLP_SHOW_RESULT_IMAGES
151  cvReleaseImage(&m_pOldVisualizationImageIpl);
152  m_pOldVisualizationImageIpl = cvCloneImage(m_pIplImageLeft);
153 #endif
154 
155 
156  // load image with OpenCV
157  for (int j = 0; j < OLP_IMG_HEIGHT; j++)
158  {
159  for (int i = 0; i < OLP_IMG_WIDTH; i++)
160  {
161  ((uchar*)(m_pIplImageLeft->imageData +
162  j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 2] =
163  pByteImageColorLeft->pixels[3 * (OLP_IMG_WIDTH * j + i)];
164  ((uchar*)(m_pIplImageLeft->imageData +
165  j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 1] =
166  pByteImageColorLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1];
167  ((uchar*)(m_pIplImageLeft->imageData +
168  j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 0] =
169  pByteImageColorLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2];
170 
171  ((uchar*)(m_pIplImageRight->imageData +
172  j * m_pIplImageRight->widthStep))[i * m_pIplImageRight->nChannels + 2] =
173  pByteImageColorRight->pixels[3 * (OLP_IMG_WIDTH * j + i)];
174  ((uchar*)(m_pIplImageRight->imageData +
175  j * m_pIplImageRight->widthStep))[i * m_pIplImageRight->nChannels + 1] =
176  pByteImageColorRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1];
177  ((uchar*)(m_pIplImageRight->imageData +
178  j * m_pIplImageRight->widthStep))[i * m_pIplImageRight->nChannels + 0] =
179  pByteImageColorRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2];
180  }
181  }
182 
183 #ifndef OLP_USE_ARMAR3_ARMAR3_4
184 
185  if (bMakeScreenshot)
186  {
187  CByteImage* pScreenshotImageGrey =
188  new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
189  ImageProcessor::ConvertImage(pByteImageColorLeft, pScreenshotImageGrey);
190  ImageProcessor::ConvertImage(pScreenshotImageGrey, screenshotImage);
191  delete pScreenshotImageGrey;
192  }
193 
194 #endif
195 
196 
197  //****************************************************************************
198  // visualize the hypotheses
199  //****************************************************************************
200 
201 
202  const int nNumHypotheses = aHypotheses.GetSize();
203 
204 
205  CvFont cFont1, cFont2, cFont3;
206  cvInitFont(&cFont1, CV_FONT_HERSHEY_SIMPLEX, 0.9, 0.9, 0, 3);
207  cvInitFont(&cFont2, CV_FONT_HERSHEY_SIMPLEX, 0.9, 0.9, 0, 2);
208  cvInitFont(&cFont3, CV_FONT_HERSHEY_SIMPLEX, 0.3, 0.3, 0, 1);
209 
210 
211  // visualize the found hypotheses
212  {
213  CvScalar cColor;
214  char* pcN = new char[4];
215 
216  for (int n = nNumHypotheses - 1; n >= 0; n--)
217  {
218  cColor = colors[aHypotheses[n]->nHypothesisNumber];
219  const int nHypothesisSize =
220  aHypotheses[n]->aNewPoints.size() + aHypotheses[n]->aVisibleConfirmedPoints.size();
221 
222  // project points to 2D
223  Vec2d* pPoints2D = new Vec2d[nHypothesisSize];
224 
225  for (int i = 0; i < (int)aHypotheses[n]->aNewPoints.size(); i++)
226  {
227  calibration->WorldToImageCoordinates(
228  aHypotheses[n]->aNewPoints.at(i)->vPosition, pPoints2D[i], false);
229  }
230 
231  for (int i = 0; i < (int)aHypotheses[n]->aVisibleConfirmedPoints.size(); i++)
232  {
233  calibration->WorldToImageCoordinates(
234  aHypotheses[n]->aVisibleConfirmedPoints.at(i)->vPosition,
235  pPoints2D[aHypotheses[n]->aNewPoints.size() + i],
236  false);
237  }
238 
239  // paint the points into the image
240  for (int i = 0; i < nHypothesisSize; i++)
241  {
242  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 0, cvScalar(255-cColor.val[0], 255-cColor.val[1], 255-cColor.val[2]));
243  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 1, cColor);
244  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 2, cColor);
245 
246  cvCircle(
247  m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 0, cColor);
248 
249  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x+1, (int)pPoints2D[i].y), 0, cColor);
250  if (!bConfirmedHypotheses)
251  {
252  cvCircle(m_pIplImageLeft,
253  cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y),
254  1,
255  cColor);
256  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 2, cColor);
257  }
258 
259  if ((!bConfirmedHypotheses) && bMakeScreenshot)
260  {
261  MarkConfirmedPoint(
262  screenshotImage, (int)pPoints2D[i].x, (int)pPoints2D[i].y, cColor);
263  }
264  else if (bMakeScreenshot)
265  {
266  MarkUnconfirmedPoint(
267  screenshotImage, (int)pPoints2D[i].x, (int)pPoints2D[i].y, cColor);
268  }
269 
270  //if (i<(int)aHypotheses[n]->aNewPoints.size())
271  //{
272  //_itoa(((int)aHypotheses[n]->aNewPoints.at(i)->vPosition.z%100), pcZ, 10);
273  //sARMARX_VERBOSE_S << pcZ, "%d", ((int)aHypotheses[n]->aNewPoints.at(i)->vPosition.z/10));
274  //cvPutText(m_pIplImageLeft, pcZ, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), &cFont3, cvScalar(255, 255, 255));
275  //}
276 
277  //if (aHypotheses[n]->eType == CObjectHypothesis::ePlane)
278  //{
279  //for (int j=i+1; j<nHypothesisSize; j++)
280  //{
281  // cvLine(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x,(int)pPoints2D[i].y), cvPoint((int)pPoints2D[j].x,(int)pPoints2D[j].y), cColor, 1);
282  //}
283  //}
284  }
285 
286  for (int i = aHypotheses[n]->aNewPoints.size(); i < nHypothesisSize; i++)
287  {
288  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 0, cColor);
289  cvCircle(
290  m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 1, cColor);
291  cvCircle(
292  m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 2, cColor);
293 
294  if (bMakeScreenshot)
295  {
296  MarkConfirmedPoint(
297  screenshotImage, (int)pPoints2D[i].x, (int)pPoints2D[i].y, cColor);
298  }
299  }
300 
301  delete[] pPoints2D;
302  }
303 
304  for (int n = nNumHypotheses - 1; n >= 0; n--)
305  {
306  cColor = colors[aHypotheses[n]->nHypothesisNumber];
307  sprintf(pcN, "%d", aHypotheses[n]->nHypothesisNumber);
308  Vec2d vCenter2d;
309  calibration->WorldToImageCoordinates(aHypotheses[n]->vCenter, vCenter2d, false);
310 
311  cvCircle(m_pIplImageLeft, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), 4, cColor);
312  cvCircle(m_pIplImageLeft, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), 7, cColor);
313  cvPutText(m_pIplImageLeft,
314  pcN,
315  cvPoint((int)vCenter2d.x, (int)vCenter2d.y),
316  &cFont1,
317  cvScalar(0, 0, 255));
318  cvPutText(
319  m_pIplImageLeft, pcN, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), &cFont2, cColor);
320  }
321 
322  delete[] pcN;
323  }
324 
325 
326 #ifndef OLP_USE_ARMAR3_ARMAR3_4
327 
328  if (bMakeScreenshot)
329  {
330  COLPTools::SetNumberInFileName(screenshotFileName, screenshotCounter);
331  screenshotImage->SaveToFile(screenshotFileName.c_str());
332 
333  if (nNumHypotheses > 0)
334  {
336  aHypotheses[0], calibration, segmentationImage);
337  }
338  else
339  {
340  ImageProcessor::Zero(segmentationImage);
341  }
342 
343  COLPTools::SetNumberInFileName(segmentationImageFileName, screenshotCounter);
344  segmentationImage->SaveToFile(segmentationImageFileName.c_str());
345 
346  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
347  {
348  segmentedCameraImage->pixels[3 * i] =
349  pByteImageColorLeft->pixels[3 * i] * segmentationImage->pixels[i] / 255;
350  segmentedCameraImage->pixels[3 * i + 1] =
351  pByteImageColorLeft->pixels[3 * i + 1] * segmentationImage->pixels[i] / 255;
352  segmentedCameraImage->pixels[3 * i + 2] =
353  pByteImageColorLeft->pixels[3 * i + 2] * segmentationImage->pixels[i] / 255;
354  }
355 
356  COLPTools::SetNumberInFileName(segmentedCameraImageFileName, screenshotCounter);
357  segmentedCameraImage->SaveToFile(segmentedCameraImageFileName.c_str());
358 
359  screenshotCounter++;
360  }
361 
362 #endif
363 
364 
365  if (pResultImageLeft)
366  {
367  for (int j = 0; j < OLP_IMG_HEIGHT; j++)
368  {
369  for (int i = 0; i < OLP_IMG_WIDTH; i++)
370  {
371  pResultImageLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 0] =
372  ((uchar*)(m_pIplImageLeft->imageData +
373  j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 2];
374  pResultImageLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1] =
375  ((uchar*)(m_pIplImageLeft->imageData +
376  j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 1];
377  pResultImageLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2] =
378  ((uchar*)(m_pIplImageLeft->imageData +
379  j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 0];
380  }
381  }
382  }
383 
384  if (pResultImageRight)
385  {
386  for (int j = 0; j < OLP_IMG_HEIGHT; j++)
387  {
388  for (int i = 0; i < OLP_IMG_WIDTH; i++)
389  {
390  pResultImageRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 0] =
391  ((uchar*)(m_pOldVisualizationImageIpl->imageData +
392  j * m_pOldVisualizationImageIpl
393  ->widthStep))[i * m_pOldVisualizationImageIpl->nChannels + 2];
394  pResultImageRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1] =
395  ((uchar*)(m_pOldVisualizationImageIpl->imageData +
396  j * m_pOldVisualizationImageIpl
397  ->widthStep))[i * m_pOldVisualizationImageIpl->nChannels + 1];
398  pResultImageRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2] =
399  ((uchar*)(m_pOldVisualizationImageIpl->imageData +
400  j * m_pOldVisualizationImageIpl
401  ->widthStep))[i * m_pOldVisualizationImageIpl->nChannels + 0];
402  }
403  }
404  }
405 
406 #ifdef OLP_SHOW_RESULT_IMAGES
407 
408  // Display the image
409  if (bConfirmedHypotheses)
410  {
411  //cvShowImage("Confirmed hypotheses", m_pIplImageLeft);
412  //cvShowImage("Hypotheses (left)", m_pOldVisualizationImageIpl);
413  }
414  else
415  {
416  //cvShowImage("Hypotheses (left)", m_pIplImageLeft);
417  }
418 
419  //cvWaitKey(OLP_WAITING_TIME_VISUALISATION);
420  //int nKey = cvWaitKey(OLP_WAITING_TIME_VISUALISATION);
421  //ARMARX_VERBOSE_S << "Key: %d\n", nKey);
422  //if (nKey != -1)
423  //{
424  //#if defined OLP_HUMAN_PUSHES
425  // ARMARX_VERBOSE_S << "\n\n\n --- Waiting for push - press a key when done ---\n\n\n\n");
426  // cvWaitKey(30000);
427  //#endif
428  //return false;
429  //}
430  //else return true;
431 #endif
432 
433  return true;
434 }
435 
436 void
438 {
439 #ifdef OLP_SHOW_RESULT_IMAGES
440 
441  // Display the image
442  if (bConfirmedHypotheses)
443  {
444  //cvShowImage("Confirmed hypotheses", m_pIplImageLeft);
445  //cvShowImage("Hypotheses (left)", m_pOldVisualizationImageIpl);
446  }
447  else
448  {
449  //cvShowImage("Hypotheses (left)", m_pIplImageLeft);
450  }
451 
452  //cvWaitKey(OLP_WAITING_TIME_VISUALISATION);
453 #endif
454 }
455 
456 void
457 CHypothesisVisualization::MarkConfirmedPoint(CByteImage* pImage, int x, int y, CvScalar cColor)
458 {
459  if (x < 2 || x > OLP_IMG_WIDTH - 3 || y < 3 || y > OLP_IMG_HEIGHT - 3)
460  {
461  return;
462  }
463 
464  pImage->pixels[(int)(3 * ((y - 2) * pImage->width + x) + 0)] = (char)cColor.val[2];
465  pImage->pixels[(int)(3 * ((y - 2) * pImage->width + x) + 1)] = (char)cColor.val[1];
466  pImage->pixels[(int)(3 * ((y - 2) * pImage->width + x) + 2)] = (char)cColor.val[0];
467 
468  pImage->pixels[(int)(3 * ((y - 1) * pImage->width + x) + 0)] = (char)cColor.val[2];
469  pImage->pixels[(int)(3 * ((y - 1) * pImage->width + x) + 1)] = (char)cColor.val[1];
470  pImage->pixels[(int)(3 * ((y - 1) * pImage->width + x) + 2)] = (char)cColor.val[0];
471 
472  pImage->pixels[(int)(3 * (y * pImage->width + x) + 0)] = (char)cColor.val[2];
473  pImage->pixels[(int)(3 * (y * pImage->width + x) + 1)] = (char)cColor.val[1];
474  pImage->pixels[(int)(3 * (y * pImage->width + x) + 2)] = (char)cColor.val[0];
475 
476  pImage->pixels[(int)(3 * ((y + 1) * pImage->width + x) + 0)] = (char)cColor.val[2];
477  pImage->pixels[(int)(3 * ((y + 1) * pImage->width + x) + 1)] = (char)cColor.val[1];
478  pImage->pixels[(int)(3 * ((y + 1) * pImage->width + x) + 2)] = (char)cColor.val[0];
479 
480  pImage->pixels[(int)(3 * ((y + 2) * pImage->width + x) + 0)] = (char)cColor.val[2];
481  pImage->pixels[(int)(3 * ((y + 2) * pImage->width + x) + 1)] = (char)cColor.val[1];
482  pImage->pixels[(int)(3 * ((y + 2) * pImage->width + x) + 2)] = (char)cColor.val[0];
483 
484  pImage->pixels[(int)(3 * (y * pImage->width + x - 2) + 0)] = (char)cColor.val[2];
485  pImage->pixels[(int)(3 * (y * pImage->width + x - 2) + 1)] = (char)cColor.val[1];
486  pImage->pixels[(int)(3 * (y * pImage->width + x - 2) + 2)] = (char)cColor.val[0];
487 
488  pImage->pixels[(int)(3 * (y * pImage->width + x - 1) + 0)] = (char)cColor.val[2];
489  pImage->pixels[(int)(3 * (y * pImage->width + x - 1) + 1)] = (char)cColor.val[1];
490  pImage->pixels[(int)(3 * (y * pImage->width + x - 1) + 2)] = (char)cColor.val[0];
491 
492  pImage->pixels[(int)(3 * (y * pImage->width + x + 1) + 0)] = (char)cColor.val[2];
493  pImage->pixels[(int)(3 * (y * pImage->width + x + 1) + 1)] = (char)cColor.val[1];
494  pImage->pixels[(int)(3 * (y * pImage->width + x + 1) + 2)] = (char)cColor.val[0];
495 
496  pImage->pixels[(int)(3 * (y * pImage->width + x + 2) + 0)] = (char)cColor.val[2];
497  pImage->pixels[(int)(3 * (y * pImage->width + x + 2) + 1)] = (char)cColor.val[1];
498  pImage->pixels[(int)(3 * (y * pImage->width + x + 2) + 2)] = (char)cColor.val[0];
499 }
500 
501 void
502 CHypothesisVisualization::MarkUnconfirmedPoint(CByteImage* pImage, int x, int y, CvScalar cColor)
503 {
504  if (x < 2 || x > OLP_IMG_WIDTH - 3 || y < 3 || y > OLP_IMG_HEIGHT - 3)
505  {
506  return;
507  }
508 
509  pImage->pixels[(int)(3 * (y * pImage->width + x) + 0)] = (char)cColor.val[2];
510  pImage->pixels[(int)(3 * (y * pImage->width + x) + 1)] = (char)cColor.val[1];
511  pImage->pixels[(int)(3 * (y * pImage->width + x) + 2)] = (char)cColor.val[0];
512 }
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:164
COLPTools::SetNumberInFileName
void SetNumberInFileName(std::string &sFileName, int nNumber, int nNumDigits)
Definition: OLPTools.cpp:1464
magic_enum::detail::n
constexpr auto n() noexcept
Definition: magic_enum.hpp:418
OLPTools.h
CHypothesisVisualization::CHypothesisVisualization
CHypothesisVisualization(CCalibration *calibration)
Definition: HypothesisVisualization.cpp:46
CHypothesisVisualization::~CHypothesisVisualization
~CHypothesisVisualization(void)
Definition: HypothesisVisualization.cpp:115
COLPTools::CreateSegmentationProbabilityMap
void CreateSegmentationProbabilityMap(const CObjectHypothesis *pHypothesis, const CCalibration *calibration, CByteImage *&pProbabilityImage)
Definition: OLPTools.cpp:1402
CHypothesisVisualization::RefreshVisualization
void RefreshVisualization(bool bConfirmedHypotheses)
Definition: HypothesisVisualization.cpp:437
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:736
HypothesisVisualization.h
CObjectHypothesisArray
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
Definition: ObjectHypothesis.h:362
OLP_SCREENSHOT_PATH
#define OLP_SCREENSHOT_PATH
Definition: ObjectLearningByPushingDefinitions.h:221
CHypothesisVisualization::VisualizeHypotheses
bool VisualizeHypotheses(const CByteImage *pByteImageColorLeft, const CByteImage *pByteImageColorRight, const CObjectHypothesisArray &aHypotheses, const CSIFTFeatureArray &aAllPoints, std::vector< CMSERDescriptor3D * > &aAllMSERs, std::vector< CMSERDescriptor3D * > &aCorrespondingMSERs, bool bConfirmedHypotheses, CByteImage *pResultImageLeft=NULL, CByteImage *pResultImageRight=NULL, const bool bMakeScreenshot=false, const bool bIsLeftImage=true)
Definition: HypothesisVisualization.cpp:132
armarx
This file offers overloads of toIce() and fromIce() functions for STL container types.
Definition: ArmarXTimeserver.cpp:27