HypothesisVisualization.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package
19  * @author
20  * @date
21  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22  * GNU General Public License
23  */
24 
26 #include "OLPTools.h"
27 
28 // IVT
29 #include <Image/ByteImage.h>
30 #include <Image/ImageProcessor.h>
31 #include <Image/IplImageAdaptor.h>
32 #include <Calibration/Calibration.h>
33 
34 // OpenCV
35 //#include <opencv2/opencv.hpp>
36 #include <opencv2/highgui/highgui.hpp>
37 #include <opencv2/imgproc/imgproc_c.h>
38 
39 //#include <cstdlib>
40 //#include <cstdio>
41 
42 //#include <ArmarXCore/core/logging/Logging.h>
43 
44 
45 
46 
48 {
49 #ifdef OLP_SHOW_RESULT_IMAGES
50  // create windows
51  //cvNamedWindow("Confirmed hypotheses", CV_WINDOW_AUTOSIZE);
52  //cvNamedWindow("Hypotheses (left)", CV_WINDOW_AUTOSIZE);
53  //cvNamedWindow("Hypotheses (right)", CV_WINDOW_AUTOSIZE);
54 #endif
55 
56  // image for visualisation
57  m_pIplImageLeft = cvCreateImage(cvSize(OLP_IMG_WIDTH, OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
58  m_pIplImageRight = cvCreateImage(cvSize(OLP_IMG_WIDTH, OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
59  m_pOldVisualizationImageIpl = cvCloneImage(m_pIplImageLeft);
60 
61  // set stereo calibration
62  this->calibration = calibration;
63 
64  // disparity stuff
65  //cvNamedWindow("Disparity", CV_WINDOW_AUTOSIZE);
66  //cvNamedWindow("Disparity edges", CV_WINDOW_AUTOSIZE);
67  //cvNamedWindow("Combined edges", CV_WINDOW_AUTOSIZE);
68  //m_pIplImageLeftRectified = cvCreateImage(cvSize(OLP_IMG_WIDTH,OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
69  //m_pIplImageRightRectified = cvCreateImage(cvSize(OLP_IMG_WIDTH,OLP_IMG_HEIGHT), IPL_DEPTH_8U, 3);
70  //m_pIplImageDisparity = cvCreateImage(cvSize(OLP_IMG_WIDTH,OLP_IMG_HEIGHT), IPL_DEPTH_8U, 1);
71 
72  // define colors
73  int nMaxColors = 1000;
74  colors = new CvScalar[nMaxColors];
75  colors[0] = cvScalar(255, 20, 20);
76  colors[1] = cvScalar(0, 0, 255);
77  colors[2] = cvScalar(0, 240, 255);
78  colors[3] = cvScalar(0, 255, 0);
79  colors[4] = cvScalar(255, 255, 0);
80  colors[5] = cvScalar(255, 0, 255);
81  colors[6] = cvScalar(45, 200, 105);
82  colors[7] = cvScalar(180, 240, 150);
83  colors[8] = cvScalar(160, 30, 55);
84  colors[9] = cvScalar(230, 130, 70);
85  colors[10] = cvScalar(70, 190, 210);
86  colors[11] = cvScalar(75, 160, 110);
87  colors[12] = cvScalar(150, 210, 155);
88  colors[13] = cvScalar(150, 30, 180);
89  colors[14] = cvScalar(210, 80, 55);
90  colors[15] = cvScalar(120, 120, 120);
91  colors[16] = cvScalar(170, 70, 75);
92  colors[17] = cvScalar(170, 200, 175);
93  colors[18] = cvScalar(40, 70, 75);
94  colors[19] = cvScalar(220, 70, 190);
95 
96  for (int n = 20; n < nMaxColors; n++)
97  {
98  colors[n] = cvScalar(20 + rand() / (RAND_MAX / 235), 20 + rand() / (RAND_MAX / 235), 20 + rand() / (RAND_MAX / 235));
99  }
100 
101 
102  screenshotImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
103  segmentationImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
104  segmentedCameraImage = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eRGB24);
105  screenshotFileName = OLP_SCREENSHOT_PATH;
106  screenshotFileName.append("hyp0000.bmp");
107  segmentationImageFileName = OLP_SCREENSHOT_PATH;
108  segmentationImageFileName.append("segm0000.bmp");
109  segmentedCameraImageFileName = OLP_SCREENSHOT_PATH;
110  segmentedCameraImageFileName.append("segmcam0000.bmp");
111  screenshotCounter = 0;
112 }
113 
114 
115 
116 
118 {
119 #ifdef OLP_SHOW_RESULT_IMAGES
120  cv::destroyWindow("Hypotheses (left)");
121  //cvDestroyWindow("Hypotheses (right)");
122  cv::destroyWindow("Confirmed hypotheses");
123 #endif
124  delete[] colors;
125  cvReleaseImage(&m_pIplImageLeft);
126  cvReleaseImage(&m_pIplImageRight);
127  cvReleaseImage(&m_pOldVisualizationImageIpl);
128  delete screenshotImage;
129  delete segmentationImage;
130  delete segmentedCameraImage;
131 }
132 
133 
134 
135 
136 bool CHypothesisVisualization::VisualizeHypotheses(const CByteImage* pByteImageColorLeft, const CByteImage* pByteImageColorRight, const CObjectHypothesisArray& aHypotheses,
137  const CSIFTFeatureArray& aAllPoints, std::vector<CMSERDescriptor3D*>& aAllMSERs, std::vector<CMSERDescriptor3D*>& aCorrespondingMSERs,
138  bool bConfirmedHypotheses, CByteImage* pResultImageLeft, CByteImage* pResultImageRight, const bool bMakeScreenshot, const bool bIsLeftImage)
139 {
140 
141  //**************************************************************************************************************
142  // visualize the found features and planes
143  //**************************************************************************************************************
144 
145 
146 #ifdef OLP_SHOW_RESULT_IMAGES
147  cvReleaseImage(&m_pOldVisualizationImageIpl);
148  m_pOldVisualizationImageIpl = cvCloneImage(m_pIplImageLeft);
149 #endif
150 
151 
152  // load image with OpenCV
153  for (int j = 0; j < OLP_IMG_HEIGHT; j++)
154  {
155  for (int i = 0; i < OLP_IMG_WIDTH; i++)
156  {
157  ((uchar*)(m_pIplImageLeft->imageData + j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 2] = pByteImageColorLeft->pixels[3 * (OLP_IMG_WIDTH * j + i)];
158  ((uchar*)(m_pIplImageLeft->imageData + j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 1] = pByteImageColorLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1];
159  ((uchar*)(m_pIplImageLeft->imageData + j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 0] = pByteImageColorLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2];
160 
161  ((uchar*)(m_pIplImageRight->imageData + j * m_pIplImageRight->widthStep))[i * m_pIplImageRight->nChannels + 2] = pByteImageColorRight->pixels[3 * (OLP_IMG_WIDTH * j + i)];
162  ((uchar*)(m_pIplImageRight->imageData + j * m_pIplImageRight->widthStep))[i * m_pIplImageRight->nChannels + 1] = pByteImageColorRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1];
163  ((uchar*)(m_pIplImageRight->imageData + j * m_pIplImageRight->widthStep))[i * m_pIplImageRight->nChannels + 0] = pByteImageColorRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2];
164 
165  }
166  }
167 
168 #ifndef OLP_USE_ARMAR3_ARMAR3_4
169 
170  if (bMakeScreenshot)
171  {
172  CByteImage* pScreenshotImageGrey = new CByteImage(OLP_IMG_WIDTH, OLP_IMG_HEIGHT, CByteImage::eGrayScale);
173  ImageProcessor::ConvertImage(pByteImageColorLeft, pScreenshotImageGrey);
174  ImageProcessor::ConvertImage(pScreenshotImageGrey, screenshotImage);
175  delete pScreenshotImageGrey;
176  }
177 
178 #endif
179 
180 
181 
182 
183  //****************************************************************************
184  // visualize the hypotheses
185  //****************************************************************************
186 
187 
188  const int nNumHypotheses = aHypotheses.GetSize();
189 
190 
191  CvFont cFont1, cFont2, cFont3;
192  cvInitFont(&cFont1, CV_FONT_HERSHEY_SIMPLEX, 0.9, 0.9, 0, 3);
193  cvInitFont(&cFont2, CV_FONT_HERSHEY_SIMPLEX, 0.9, 0.9, 0, 2);
194  cvInitFont(&cFont3, CV_FONT_HERSHEY_SIMPLEX, 0.3, 0.3, 0, 1);
195 
196 
197  // visualize the found hypotheses
198  {
199  CvScalar cColor;
200  char* pcN = new char[4];
201 
202  for (int n = nNumHypotheses - 1; n >= 0; n--)
203  {
204  cColor = colors[aHypotheses[n]->nHypothesisNumber];
205  const int nHypothesisSize = aHypotheses[n]->aNewPoints.size() + aHypotheses[n]->aVisibleConfirmedPoints.size();
206 
207  // project points to 2D
208  Vec2d* pPoints2D = new Vec2d[nHypothesisSize];
209 
210  for (int i = 0; i < (int)aHypotheses[n]->aNewPoints.size(); i++)
211  {
212  calibration->WorldToImageCoordinates(aHypotheses[n]->aNewPoints.at(i)->vPosition, pPoints2D[i], false);
213  }
214 
215  for (int i = 0; i < (int)aHypotheses[n]->aVisibleConfirmedPoints.size(); i++)
216  {
217  calibration->WorldToImageCoordinates(aHypotheses[n]->aVisibleConfirmedPoints.at(i)->vPosition, pPoints2D[aHypotheses[n]->aNewPoints.size() + i], false);
218  }
219 
220  // paint the points into the image
221  for (int i = 0; i < nHypothesisSize; i++)
222  {
223  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 0, cvScalar(255-cColor.val[0], 255-cColor.val[1], 255-cColor.val[2]));
224  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 1, cColor);
225  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 2, cColor);
226 
227  cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 0, cColor);
228 
229  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x+1, (int)pPoints2D[i].y), 0, cColor);
230  if (!bConfirmedHypotheses)
231  {
232  cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 1, cColor);
233  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 2, cColor);
234  }
235 
236  if ((!bConfirmedHypotheses) && bMakeScreenshot)
237  {
238  MarkConfirmedPoint(screenshotImage, (int)pPoints2D[i].x, (int)pPoints2D[i].y, cColor);
239  }
240  else if (bMakeScreenshot)
241  {
242  MarkUnconfirmedPoint(screenshotImage, (int)pPoints2D[i].x, (int)pPoints2D[i].y, cColor);
243  }
244 
245  //if (i<(int)aHypotheses[n]->aNewPoints.size())
246  //{
247  //_itoa(((int)aHypotheses[n]->aNewPoints.at(i)->vPosition.z%100), pcZ, 10);
248  //sARMARX_VERBOSE_S << pcZ, "%d", ((int)aHypotheses[n]->aNewPoints.at(i)->vPosition.z/10));
249  //cvPutText(m_pIplImageLeft, pcZ, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), &cFont3, cvScalar(255, 255, 255));
250  //}
251 
252  //if (aHypotheses[n]->eType == CObjectHypothesis::ePlane)
253  //{
254  //for (int j=i+1; j<nHypothesisSize; j++)
255  //{
256  // cvLine(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x,(int)pPoints2D[i].y), cvPoint((int)pPoints2D[j].x,(int)pPoints2D[j].y), cColor, 1);
257  //}
258  //}
259  }
260 
261  for (int i = aHypotheses[n]->aNewPoints.size(); i < nHypothesisSize; i++)
262  {
263  //cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 0, cColor);
264  cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 1, cColor);
265  cvCircle(m_pIplImageLeft, cvPoint((int)pPoints2D[i].x, (int)pPoints2D[i].y), 2, cColor);
266 
267  if (bMakeScreenshot)
268  {
269  MarkConfirmedPoint(screenshotImage, (int)pPoints2D[i].x, (int)pPoints2D[i].y, cColor);
270  }
271  }
272 
273  delete[] pPoints2D;
274  }
275 
276  for (int n = nNumHypotheses - 1; n >= 0; n--)
277  {
278  cColor = colors[aHypotheses[n]->nHypothesisNumber];
279  sprintf(pcN, "%d", aHypotheses[n]->nHypothesisNumber);
280  Vec2d vCenter2d;
281  calibration->WorldToImageCoordinates(aHypotheses[n]->vCenter, vCenter2d, false);
282 
283  cvCircle(m_pIplImageLeft, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), 4, cColor);
284  cvCircle(m_pIplImageLeft, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), 7, cColor);
285  cvPutText(m_pIplImageLeft, pcN, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), &cFont1, cvScalar(0, 0, 255));
286  cvPutText(m_pIplImageLeft, pcN, cvPoint((int)vCenter2d.x, (int)vCenter2d.y), &cFont2, cColor);
287  }
288 
289  delete[] pcN;
290  }
291 
292 
293 
294 
295 
296 #ifndef OLP_USE_ARMAR3_ARMAR3_4
297 
298  if (bMakeScreenshot)
299  {
300  COLPTools::SetNumberInFileName(screenshotFileName, screenshotCounter);
301  screenshotImage->SaveToFile(screenshotFileName.c_str());
302 
303  if (nNumHypotheses > 0)
304  {
305  COLPTools::CreateSegmentationProbabilityMap(aHypotheses[0], calibration, segmentationImage);
306  }
307  else
308  {
309  ImageProcessor::Zero(segmentationImage);
310  }
311 
312  COLPTools::SetNumberInFileName(segmentationImageFileName, screenshotCounter);
313  segmentationImage->SaveToFile(segmentationImageFileName.c_str());
314 
315  for (int i = 0; i < OLP_IMG_WIDTH * OLP_IMG_HEIGHT; i++)
316  {
317  segmentedCameraImage->pixels[3 * i] = pByteImageColorLeft->pixels[3 * i] * segmentationImage->pixels[i] / 255;
318  segmentedCameraImage->pixels[3 * i + 1] = pByteImageColorLeft->pixels[3 * i + 1] * segmentationImage->pixels[i] / 255;
319  segmentedCameraImage->pixels[3 * i + 2] = pByteImageColorLeft->pixels[3 * i + 2] * segmentationImage->pixels[i] / 255;
320  }
321 
322  COLPTools::SetNumberInFileName(segmentedCameraImageFileName, screenshotCounter);
323  segmentedCameraImage->SaveToFile(segmentedCameraImageFileName.c_str());
324 
325  screenshotCounter++;
326  }
327 
328 #endif
329 
330 
331 
332  if (pResultImageLeft)
333  {
334  for (int j = 0; j < OLP_IMG_HEIGHT; j++)
335  {
336  for (int i = 0; i < OLP_IMG_WIDTH; i++)
337  {
338  pResultImageLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 0] = ((uchar*)(m_pIplImageLeft->imageData + j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 2];
339  pResultImageLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1] = ((uchar*)(m_pIplImageLeft->imageData + j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 1];
340  pResultImageLeft->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2] = ((uchar*)(m_pIplImageLeft->imageData + j * m_pIplImageLeft->widthStep))[i * m_pIplImageLeft->nChannels + 0];
341  }
342  }
343  }
344 
345  if (pResultImageRight)
346  {
347  for (int j = 0; j < OLP_IMG_HEIGHT; j++)
348  {
349  for (int i = 0; i < OLP_IMG_WIDTH; i++)
350  {
351  pResultImageRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 0] = ((uchar*)(m_pOldVisualizationImageIpl->imageData + j * m_pOldVisualizationImageIpl->widthStep))[i * m_pOldVisualizationImageIpl->nChannels + 2];
352  pResultImageRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 1] = ((uchar*)(m_pOldVisualizationImageIpl->imageData + j * m_pOldVisualizationImageIpl->widthStep))[i * m_pOldVisualizationImageIpl->nChannels + 1];
353  pResultImageRight->pixels[3 * (OLP_IMG_WIDTH * j + i) + 2] = ((uchar*)(m_pOldVisualizationImageIpl->imageData + j * m_pOldVisualizationImageIpl->widthStep))[i * m_pOldVisualizationImageIpl->nChannels + 0];
354  }
355  }
356  }
357 
358 #ifdef OLP_SHOW_RESULT_IMAGES
359 
360  // Display the image
361  if (bConfirmedHypotheses)
362  {
363  //cvShowImage("Confirmed hypotheses", m_pIplImageLeft);
364  //cvShowImage("Hypotheses (left)", m_pOldVisualizationImageIpl);
365  }
366  else
367  {
368  //cvShowImage("Hypotheses (left)", m_pIplImageLeft);
369  }
370 
371  //cvWaitKey(OLP_WAITING_TIME_VISUALISATION);
372  //int nKey = cvWaitKey(OLP_WAITING_TIME_VISUALISATION);
373  //ARMARX_VERBOSE_S << "Key: %d\n", nKey);
374  //if (nKey != -1)
375  //{
376  //#if defined OLP_HUMAN_PUSHES
377  // ARMARX_VERBOSE_S << "\n\n\n --- Waiting for push - press a key when done ---\n\n\n\n");
378  // cvWaitKey(30000);
379  //#endif
380  //return false;
381  //}
382  //else return true;
383 #endif
384 
385  return true;
386 }
387 
388 void CHypothesisVisualization::RefreshVisualization(bool bConfirmedHypotheses)
389 {
390 #ifdef OLP_SHOW_RESULT_IMAGES
391 
392  // Display the image
393  if (bConfirmedHypotheses)
394  {
395  //cvShowImage("Confirmed hypotheses", m_pIplImageLeft);
396  //cvShowImage("Hypotheses (left)", m_pOldVisualizationImageIpl);
397  }
398  else
399  {
400  //cvShowImage("Hypotheses (left)", m_pIplImageLeft);
401  }
402 
403  //cvWaitKey(OLP_WAITING_TIME_VISUALISATION);
404 #endif
405 }
406 
407 
408 
409 
410 void CHypothesisVisualization::MarkConfirmedPoint(CByteImage* pImage, int x, int y, CvScalar cColor)
411 {
412  if (x < 2 || x > OLP_IMG_WIDTH - 3 || y < 3 || y > OLP_IMG_HEIGHT - 3)
413  {
414  return;
415  }
416 
417  pImage->pixels[(int)(3 * ((y - 2)*pImage->width + x) + 0)] = (char)cColor.val[2];
418  pImage->pixels[(int)(3 * ((y - 2)*pImage->width + x) + 1)] = (char)cColor.val[1];
419  pImage->pixels[(int)(3 * ((y - 2)*pImage->width + x) + 2)] = (char)cColor.val[0];
420 
421  pImage->pixels[(int)(3 * ((y - 1)*pImage->width + x) + 0)] = (char)cColor.val[2];
422  pImage->pixels[(int)(3 * ((y - 1)*pImage->width + x) + 1)] = (char)cColor.val[1];
423  pImage->pixels[(int)(3 * ((y - 1)*pImage->width + x) + 2)] = (char)cColor.val[0];
424 
425  pImage->pixels[(int)(3 * (y * pImage->width + x) + 0)] = (char)cColor.val[2];
426  pImage->pixels[(int)(3 * (y * pImage->width + x) + 1)] = (char)cColor.val[1];
427  pImage->pixels[(int)(3 * (y * pImage->width + x) + 2)] = (char)cColor.val[0];
428 
429  pImage->pixels[(int)(3 * ((y + 1)*pImage->width + x) + 0)] = (char)cColor.val[2];
430  pImage->pixels[(int)(3 * ((y + 1)*pImage->width + x) + 1)] = (char)cColor.val[1];
431  pImage->pixels[(int)(3 * ((y + 1)*pImage->width + x) + 2)] = (char)cColor.val[0];
432 
433  pImage->pixels[(int)(3 * ((y + 2)*pImage->width + x) + 0)] = (char)cColor.val[2];
434  pImage->pixels[(int)(3 * ((y + 2)*pImage->width + x) + 1)] = (char)cColor.val[1];
435  pImage->pixels[(int)(3 * ((y + 2)*pImage->width + x) + 2)] = (char)cColor.val[0];
436 
437  pImage->pixels[(int)(3 * (y * pImage->width + x - 2) + 0)] = (char)cColor.val[2];
438  pImage->pixels[(int)(3 * (y * pImage->width + x - 2) + 1)] = (char)cColor.val[1];
439  pImage->pixels[(int)(3 * (y * pImage->width + x - 2) + 2)] = (char)cColor.val[0];
440 
441  pImage->pixels[(int)(3 * (y * pImage->width + x - 1) + 0)] = (char)cColor.val[2];
442  pImage->pixels[(int)(3 * (y * pImage->width + x - 1) + 1)] = (char)cColor.val[1];
443  pImage->pixels[(int)(3 * (y * pImage->width + x - 1) + 2)] = (char)cColor.val[0];
444 
445  pImage->pixels[(int)(3 * (y * pImage->width + x + 1) + 0)] = (char)cColor.val[2];
446  pImage->pixels[(int)(3 * (y * pImage->width + x + 1) + 1)] = (char)cColor.val[1];
447  pImage->pixels[(int)(3 * (y * pImage->width + x + 1) + 2)] = (char)cColor.val[0];
448 
449  pImage->pixels[(int)(3 * (y * pImage->width + x + 2) + 0)] = (char)cColor.val[2];
450  pImage->pixels[(int)(3 * (y * pImage->width + x + 2) + 1)] = (char)cColor.val[1];
451  pImage->pixels[(int)(3 * (y * pImage->width + x + 2) + 2)] = (char)cColor.val[0];
452 }
453 
454 
455 
456 void CHypothesisVisualization::MarkUnconfirmedPoint(CByteImage* pImage, int x, int y, CvScalar cColor)
457 {
458  if (x < 2 || x > OLP_IMG_WIDTH - 3 || y < 3 || y > OLP_IMG_HEIGHT - 3)
459  {
460  return;
461  }
462 
463  pImage->pixels[(int)(3 * (y * pImage->width + x) + 0)] = (char)cColor.val[2];
464  pImage->pixels[(int)(3 * (y * pImage->width + x) + 1)] = (char)cColor.val[1];
465  pImage->pixels[(int)(3 * (y * pImage->width + x) + 2)] = (char)cColor.val[0];
466 
467 }
468 
OLP_IMG_HEIGHT
#define OLP_IMG_HEIGHT
Definition: ObjectLearningByPushingDefinitions.h:69
OLP_IMG_WIDTH
#define OLP_IMG_WIDTH
Definition: ObjectLearningByPushingDefinitions.h:68
CSIFTFeatureArray
CDynamicArrayTemplate< CSIFTFeatureEntry * > CSIFTFeatureArray
Definition: ObjectHypothesis.h:168
COLPTools::SetNumberInFileName
void SetNumberInFileName(std::string &sFileName, int nNumber, int nNumDigits)
Definition: OLPTools.cpp:1378
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:694
OLPTools.h
CHypothesisVisualization::CHypothesisVisualization
CHypothesisVisualization(CCalibration *calibration)
Definition: HypothesisVisualization.cpp:47
CHypothesisVisualization::~CHypothesisVisualization
~CHypothesisVisualization(void)
Definition: HypothesisVisualization.cpp:117
COLPTools::CreateSegmentationProbabilityMap
void CreateSegmentationProbabilityMap(const CObjectHypothesis *pHypothesis, const CCalibration *calibration, CByteImage *&pProbabilityImage)
Definition: OLPTools.cpp:1319
CHypothesisVisualization::RefreshVisualization
void RefreshVisualization(bool bConfirmedHypotheses)
Definition: HypothesisVisualization.cpp:388
HypothesisVisualization.h
CObjectHypothesisArray
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
Definition: ObjectHypothesis.h:359
OLP_SCREENSHOT_PATH
#define OLP_SCREENSHOT_PATH
Definition: ObjectLearningByPushingDefinitions.h:206
CHypothesisVisualization::VisualizeHypotheses
bool VisualizeHypotheses(const CByteImage *pByteImageColorLeft, const CByteImage *pByteImageColorRight, const CObjectHypothesisArray &aHypotheses, const CSIFTFeatureArray &aAllPoints, std::vector< CMSERDescriptor3D * > &aAllMSERs, std::vector< CMSERDescriptor3D * > &aCorrespondingMSERs, bool bConfirmedHypotheses, CByteImage *pResultImageLeft=NULL, CByteImage *pResultImageRight=NULL, const bool bMakeScreenshot=false, const bool bIsLeftImage=true)
Definition: HypothesisVisualization.cpp:136