HypothesisValidationRGBD.cpp
Go to the documentation of this file.
1/*
2 * This file is part of ArmarX.
3 *
4 * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5 *
6 * ArmarX is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ArmarX is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * @package
19 * @author
20 * @date
21 * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
22 * GNU General Public License
23 */
25
26#include "ColorICP.h"
27#include "OLPTools.h"
28#include "ObjectRecognition.h"
30
31
32// IVT
33#include <cstdlib>
34
36
37#include <Calibration/Calibration.h>
38#include <Image/ByteImage.h>
39#include <Math/Math3d.h>
40
41void
43 const std::vector<CHypothesisPoint*>& aOldDepthMapPoints,
44 const std::vector<CHypothesisPoint*>& aNewDepthMapPoints,
45 const CByteImage* pForegroundImage,
46 const CByteImage* pHSVImage,
47 const CCalibration* calibration,
48 const Vec3d upwardsVector,
49 CObjectHypothesisArray* pObjectHypotheses,
50 CObjectHypothesisArray* pConfirmedHypotheses)
51{
52 // calculate foreground points
53 std::vector<CHypothesisPoint*> aForegroundPoints;
55 aNewDepthMapPoints, pForegroundImage, calibration, aForegroundPoints);
56 ARMARX_VERBOSE_S << aForegroundPoints.size() << " foreground points";
57
58
59 // if something changed in the image, determine transformations and validate hypotheses
60 if (aForegroundPoints.size() > 0)
61 {
62 // create hypotheses from points that changed
64 aOldDepthMapPoints, pForegroundImage, calibration, pObjectHypotheses);
65
66 // set up the point cloud registration
67 CPointCloudRegistration* pPointCloudRegistration = new CPointCloudRegistration();
68 pPointCloudRegistration->SetScenePointcloud(aNewDepthMapPoints);
69
70 // validate all hypotheses
71 for (int i = 0; i < pObjectHypotheses->GetSize(); i++)
72 {
73 // debug only for paper!
74 //if ((*pObjectHypotheses)[i]->nHypothesisNumber > 40) continue;
75
76 // check if the hypothesis is of type RGBD
77 if (((*pObjectHypotheses)[i]->eType != CObjectHypothesis::eRGBD) &&
78 ((*pObjectHypotheses)[i]->eType != CObjectHypothesis::eSingleColored) && false)
79 {
80 ARMARX_VERBOSE_S << "ValidateInitialHypothesis: Wrong hypothesis type (not RGBD or "
81 "SingleColored). Not checking it for motion.";
82 }
83 else
84 {
85 // check hypothesis only if it lies in a region that changed
86 float fForegroundRatio;
87 int nForegroundPoints;
89 pForegroundImage,
90 calibration,
91 fForegroundRatio,
92 nForegroundPoints);
93
94 if (fForegroundRatio >
95 0.3f) // || 100*nForegroundPoints>OLP_IMG_WIDTH*OLP_IMG_HEIGHT)
96 {
97 ARMARX_VERBOSE_S << "Estimating transformation of hypothesis "
98 << (*pObjectHypotheses)[i]->nHypothesisNumber << " ("
99 << 100 * fForegroundRatio << " % change, "
100 << (*pObjectHypotheses)[i]->aNewPoints.size() << " points).";
101
102 CObjectHypothesis* pConfirmedHypothesis;
103 bool bValidated = ValidateHypothesis(pPointCloudRegistration,
104 aNewDepthMapPoints,
105 aForegroundPoints,
106 pForegroundImage,
107 pHSVImage,
108 calibration,
109 upwardsVector,
110 (*pObjectHypotheses)[i],
111 pConfirmedHypothesis,
113 //bool bValidated = ValidateHypothesis(pPointCloudRegistration, aNewDepthMapPoints, aForegroundPoints, pForegroundImage, pHSVImage, calibration, upwardsVector, (*pObjectHypotheses)[i], pConfirmedHypothesis, OLP_EFFORT_POINTCLOUD_MATCHING);
114
115 if (bValidated)
116 {
117 pConfirmedHypotheses->AddElement(pConfirmedHypothesis);
118 }
119 }
120 else
121 {
122 ARMARX_VERBOSE_S << "The image region containing hypothesis " << i
123 << " did not change (" << 100 * fForegroundRatio << " %).";
124 }
125 }
126 }
127
128 delete pPointCloudRegistration;
129 }
130 else
131 {
132 ARMARX_VERBOSE_S << "No change in image detected!";
133 }
134}
135
136void
138 const std::vector<CHypothesisPoint*>& aNewDepthMapPoints,
139 const CByteImage* pForegroundImage,
140 const CByteImage* pHSVImage,
141 const CCalibration* calibration,
142 const Vec3d upwardsVector,
143 CObjectHypothesisArray* pObjectHypotheses)
144{
145 // calculate foreground points
146 std::vector<CHypothesisPoint*> aForegroundPoints;
148 aNewDepthMapPoints, pForegroundImage, calibration, aForegroundPoints);
149 ARMARX_VERBOSE_S << aForegroundPoints.size() << " foreground points";
150
151
152 // if something changed in the image, determine transformations and revalidate hypotheses
153 if (aForegroundPoints.size() > 0)
154 {
155 // set up the point cloud registration
156 CPointCloudRegistration* pPointCloudRegistration = new CPointCloudRegistration();
157 pPointCloudRegistration->SetScenePointcloud(aNewDepthMapPoints);
158
159 // validate all hypotheses
160 for (int i = 0; i < pObjectHypotheses->GetSize(); i++)
161 {
162 // check hypothesis, no matter if it lies in a region that changed
163 CObjectHypothesis* pObjectHypothesis = (*pObjectHypotheses)[i];
164 ARMARX_VERBOSE_S << "Estimating transformation of hypothesis "
165 << pObjectHypothesis->nHypothesisNumber << " ("
166 << (*pObjectHypotheses)[i]->aConfirmedPoints.size() << " + "
167 << (*pObjectHypotheses)[i]->aNewPoints.size() << " points).";
168 CObjectHypothesis* pConfirmedHypothesis;
169 bool bValidated = ValidateHypothesis(pPointCloudRegistration,
170 aNewDepthMapPoints,
171 aForegroundPoints,
172 pForegroundImage,
173 pHSVImage,
174 calibration,
175 upwardsVector,
176 pObjectHypothesis,
177 pConfirmedHypothesis);
178
179 if (bValidated)
180 {
181 (*pObjectHypotheses)[i] = pConfirmedHypothesis;
182 delete pObjectHypothesis;
183 }
184 else
185 {
186 // just keep the hypothesis
187 }
188 }
189
190 delete pPointCloudRegistration;
191 }
192 else
193 {
194 ARMARX_VERBOSE_S << "No change in image detected!";
195 }
196}
197
198void
200 const std::vector<CHypothesisPoint*>& aOldDepthMapPoints,
201 const CByteImage* pForegroundImage,
202 const CCalibration* calibration,
203 CObjectHypothesisArray* pObjectHypotheses)
204{
205 std::vector<CHypothesisPoint*> aForegroundPointsFromOldPoints;
207 aOldDepthMapPoints, pForegroundImage, calibration, aForegroundPointsFromOldPoints);
208 ARMARX_VERBOSE_S << aForegroundPointsFromOldPoints.size()
209 << " points for foreground hypotheses";
210
211 // cluster the points and create foreground hypotheses
212 std::vector<std::vector<CHypothesisPoint*>> aForegroundPointsFromOldPointsClusters;
213 COLPTools::ClusterXMeans(aForegroundPointsFromOldPoints,
214 2,
215 5,
217 aForegroundPointsFromOldPointsClusters);
218 ARMARX_VERBOSE_S << aForegroundPointsFromOldPointsClusters.size() << " foreground hypotheses";
219
220 for (size_t i = 0; i < aForegroundPointsFromOldPointsClusters.size(); i++)
221 {
222 COLPTools::RemoveOutliers(aForegroundPointsFromOldPointsClusters.at(i), 1.7f);
223
224 if (aForegroundPointsFromOldPointsClusters.at(i).size() > OLP_MIN_NUM_FEATURES)
225 {
226 CObjectHypothesis* pForegroundHypo = new CObjectHypothesis();
227 Vec3d vCenter = {0, 0, 0};
228
229 for (size_t j = 0; j < aForegroundPointsFromOldPointsClusters.at(i).size(); j++)
230 {
231 pForegroundHypo->aNewPoints.push_back(
232 aForegroundPointsFromOldPointsClusters.at(i).at(j)->GetCopy());
233 Math3d::AddToVec(vCenter,
234 aForegroundPointsFromOldPointsClusters.at(i).at(j)->vPosition);
235 }
236
237 Math3d::MulVecScalar(vCenter,
238 1.0 / aForegroundPointsFromOldPointsClusters.at(i).size(),
239 pForegroundHypo->vCenter);
240 pForegroundHypo->nHypothesisNumber = pObjectHypotheses->GetSize();
241 pForegroundHypo->eType = CObjectHypothesis::eRGBD;
242 pObjectHypotheses->AddElement(pForegroundHypo);
243 }
244 }
245}
246
247bool
249 CPointCloudRegistration* pPointCloudRegistration,
250 const std::vector<CHypothesisPoint*>& aNewDepthMapPoints,
251 const std::vector<CHypothesisPoint*>& aForegroundPoints,
252 const CByteImage* pForegroundImage,
253 const CByteImage* pHSVImage,
254 const CCalibration* calibration,
255 const Vec3d upwardsVector,
256 CObjectHypothesis* pObjectHypothesis,
257 CObjectHypothesis*& pConfirmedHypothesis,
258 const int nEffortLevel)
259{
260 Mat3d mRot = Math3d::unit_mat;
261 Vec3d vTrans = Math3d::zero_vec;
262 float errorAtOriginalPosition = FLT_MAX;
263
264 // check if the hypothesis moved
265 if (pPointCloudRegistration->CheckObjectMatchAtOriginalPosition(
266 pObjectHypothesis, errorAtOriginalPosition, nEffortLevel))
267 {
268 ARMARX_VERBOSE_S << "CHypothesisValidationRGBD: Initial check at old object position: the "
269 "transformation shows no motion.";
270 pObjectHypothesis->aHypothesisHasMoved.push_back(false);
271 return false;
272 }
273
274 // get possible object locations based on color region similarity
275 std::vector<Vec3d> aPossibleLocations;
277 pHSVImage,
278 aNewDepthMapPoints,
279 calibration,
280 aPossibleLocations,
282
283 // estimate the transformation of the hypothesis
284 std::vector<float> aPointMatchDistances;
285 std::vector<CColorICP::CPointXYZRGBI> aNearestNeighbors;
286 pPointCloudRegistration->EstimateTransformation(pObjectHypothesis,
287 mRot,
288 vTrans,
289 upwardsVector,
290 nEffortLevel,
291 &aPossibleLocations,
292 &aNearestNeighbors,
293 &aPointMatchDistances,
294 2 * errorAtOriginalPosition);
295 //CPointCloudRegistration::EstimateTransformation(pObjectHypothesis, aForegroundPoints, mRot, vTrans, &aNearestNeighbors, &aPointMatchDistances);
296 Math3d::SetVec(pObjectHypothesis->vLastTranslation, vTrans);
297 Math3d::SetMat(pObjectHypothesis->mLastRotation, mRot);
298
299 // get the rotation angle
300 float fRotationAngle;
301 Vec3d vTemp;
302 Math3d::GetAxisAndAngle(mRot, vTemp, fRotationAngle);
303 fRotationAngle *= 180.0f / M_PI;
304
305 // get the translation of the object center
306 Math3d::MulMatVec(mRot, pObjectHypothesis->vCenter, vTrans, vTemp);
307 float fDistance = Math3d::Distance(vTemp, pObjectHypothesis->vCenter);
308
309 // accept only transformations with significant motion
310 float fMotionMeasure = 2 * fRotationAngle + fDistance; // 0.5 deg rotation <=> 1mm translation
311 ARMARX_VERBOSE_S << "Motion measure: " << fMotionMeasure << " (" << fDistance << "mm, "
312 << fRotationAngle << "deg)";
313
314 if ((fMotionMeasure > OLP_MINIMAL_MOTION_MEASURE) &&
315 (2 * fDistance > OLP_MINIMAL_MOTION_MEASURE))
316 {
317 // debug
318 int nNoForeground = 0;
319 int nLargeDistance = 0;
320
321
322 // create transformed hypothesis
323 CObjectHypothesis* pTransformedHypo = new CObjectHypothesis();
324 pTransformedHypo->nHypothesisNumber = pObjectHypothesis->nHypothesisNumber;
325 pTransformedHypo->eType = pObjectHypothesis->eType;
326 Math3d::SetVec(pTransformedHypo->vLastTranslation, vTrans);
327 Math3d::SetMat(pTransformedHypo->mLastRotation, mRot);
328 CHypothesisPoint* pPoint;
329
330
331 // check for each candidate point if it is in the area of the image that changed; and if yes, check if it concurred with the transformation
332 for (size_t j = 0; j < pObjectHypothesis->aNewPoints.size(); j++)
333 {
334 pPoint = pObjectHypothesis->aNewPoints.at(j)->GetCopy();
335
336 // check if old and transformed position of the point are in the image region that changed
337 bool bIsInForeground =
338 COLPTools::PointIsInForeground(pPoint, pForegroundImage, calibration);
339 Math3d::MulMatVec(mRot, pPoint->vPosition, vTrans, pPoint->vPosition);
340 bIsInForeground &=
341 COLPTools::PointIsInForeground(pPoint, pForegroundImage, calibration);
342
343 if (bIsInForeground)
344 {
345 // check if the point matches well after the transformation
346 if (aPointMatchDistances.at(pObjectHypothesis->aConfirmedPoints.size() + j) <
348 {
349 pTransformedHypo->aConfirmedPoints.push_back(pPoint);
350 }
351 else
352 {
353 delete pPoint;
354 nLargeDistance++;
355 }
356 }
357 else
358 {
359 delete pPoint;
360 nNoForeground++;
361 }
362 }
363
364
365 // check the confirmed points
366 for (size_t j = 0; j < pObjectHypothesis->aConfirmedPoints.size(); j++)
367 {
368 pPoint = pObjectHypothesis->aConfirmedPoints.at(j)->GetCopy();
369
370 // check if old and transformed position of the point are in the image region that changed
371 bool bIsInForeground =
372 COLPTools::PointIsInForeground(pPoint, pForegroundImage, calibration);
373 Math3d::MulMatVec(mRot, pPoint->vPosition, vTrans, pPoint->vPosition);
374 bIsInForeground &=
375 COLPTools::PointIsInForeground(pPoint, pForegroundImage, calibration);
376
377 if (bIsInForeground)
378 {
379 // check if the point matches well after the transformation
380 if (aPointMatchDistances.at(j) < OLP_TOLERANCE_CONCURRENT_MOTION)
381 {
382 pTransformedHypo->aConfirmedPoints.push_back(pPoint);
383 }
384 else
385 {
386 // interpolate with nearest neighbor
387 CColorICP::CPointXYZRGBI pNeighbor = aNearestNeighbors.at(j);
388 pPoint->vPosition.x = 0.5f * (pPoint->vPosition.x + pNeighbor.x);
389 pPoint->vPosition.y = 0.5f * (pPoint->vPosition.y + pNeighbor.y);
390 pPoint->vPosition.z = 0.5f * (pPoint->vPosition.z + pNeighbor.z);
391 pPoint->fColorR = 0.5f * (pPoint->fColorR + pNeighbor.r);
392 pPoint->fColorG = 0.5f * (pPoint->fColorG + pNeighbor.g);
393 pPoint->fColorB = 0.5f * (pPoint->fColorB + pNeighbor.b);
394 pPoint->fIntensity = 0.5f * (pPoint->fIntensity + pNeighbor.i);
395
396 pTransformedHypo->aNewPoints.push_back(pPoint);
397 nLargeDistance++;
398 }
399 }
400 else
401 {
402 if (aPointMatchDistances.at(j) < OLP_TOLERANCE_CONCURRENT_MOTION)
403 {
404 pTransformedHypo->aDoubtablePoints.push_back(pPoint);
405 nNoForeground++;
406 }
407 else
408 {
409 delete pPoint;
410 nNoForeground++;
411 }
412 }
413 }
414
415 // check the doubtable points
416 for (size_t j = 0; j < pObjectHypothesis->aDoubtablePoints.size(); j++)
417 {
418 pPoint = pObjectHypothesis->aDoubtablePoints.at(j)->GetCopy();
419
420 // check if old and transformed position of the point are in the image region that changed
421 bool bIsInForeground =
422 COLPTools::PointIsInForeground(pPoint, pForegroundImage, calibration);
423 Math3d::MulMatVec(mRot, pPoint->vPosition, vTrans, pPoint->vPosition);
424 bIsInForeground &=
425 COLPTools::PointIsInForeground(pPoint, pForegroundImage, calibration);
426
427 if (bIsInForeground)
428 {
429 // check if the point matches well after the transformation
430 if (aPointMatchDistances.at(pObjectHypothesis->aConfirmedPoints.size() +
431 pObjectHypothesis->aNewPoints.size() + j) <
433 {
434 //pTransformedHypo->aNewPoints.push_back(pPoint);
435 pTransformedHypo->aConfirmedPoints.push_back(pPoint);
436 }
437 else
438 {
439 delete pPoint;
440 nLargeDistance++;
441 }
442 }
443 else
444 {
445 delete pPoint;
446 nNoForeground++;
447 }
448 }
449
450 ARMARX_VERBOSE_S << "Discarded features: no foreground " << nNoForeground << ", bad match "
451 << nLargeDistance;
452
453
454 // if the hypothesis still looks good after the transformation, clean up and add new candidate points
455
456 if (pTransformedHypo->aConfirmedPoints.size() > OLP_MIN_NUM_FEATURES)
457 {
458 // fuse very close points
459
460 int nTooClose = 0;
461 std::vector<Vec2d> aPointPositions2D;
462 aPointPositions2D.resize(pTransformedHypo->aConfirmedPoints.size());
463
464 for (size_t i = 0; i < pTransformedHypo->aConfirmedPoints.size(); i++)
465 {
466 calibration->WorldToImageCoordinates(
467 pTransformedHypo->aConfirmedPoints.at(i)->vPosition,
468 aPointPositions2D.at(i),
469 false);
470 }
471
472 for (size_t i = 0; i < pTransformedHypo->aConfirmedPoints.size(); i++)
473 {
474 const Vec2d vPos2D = aPointPositions2D.at(i);
475
476 for (size_t j = i + 1; j < pTransformedHypo->aConfirmedPoints.size(); j++)
477 {
478 if (Math2d::Distance(vPos2D, aPointPositions2D.at(j)) <
480 {
481 CHypothesisPoint* fusedPoint =
482 FuseTwoPoints(pTransformedHypo->aConfirmedPoints.at(i),
483 pTransformedHypo->aConfirmedPoints.at(j));
484 delete pTransformedHypo->aConfirmedPoints.at(i);
485 pTransformedHypo->aConfirmedPoints.at(i) = fusedPoint;
486 delete pTransformedHypo->aConfirmedPoints.at(j);
487 pTransformedHypo->aConfirmedPoints.at(j) =
488 pTransformedHypo->aConfirmedPoints.at(
489 pTransformedHypo->aConfirmedPoints.size() - 1);
490 pTransformedHypo->aConfirmedPoints.pop_back();
491 aPointPositions2D.at(j) =
492 aPointPositions2D.at(aPointPositions2D.size() - 1);
493 nTooClose++;
494 }
495 }
496 }
497
498 ARMARX_VERBOSE_S << "Discarded " << nTooClose
499 << " points because they were too close to another one";
500
501
502 // add new candidate points
503
504 std::vector<CHypothesisPoint*> aNewCandidatePoints;
505 std::vector<Vec2d> aForegroundPoints2D, aConfirmedPoints2D, aNewAndDoubtablePoints2D;
506 std::vector<Vec3d> aForegroundPoints3D, aConfirmedPoints3D;
507#pragma omp sections
508 {
509#pragma omp section
510 {
511 for (size_t i = 0; i < aForegroundPoints.size(); i++)
512 {
513 Vec3d point3D = aForegroundPoints.at(i)->vPosition;
514 aForegroundPoints3D.push_back(point3D);
515 Vec2d point2D;
516 calibration->WorldToImageCoordinates(point3D, point2D, false);
517 aForegroundPoints2D.push_back(point2D);
518 }
519 }
520
521#pragma omp section
522 {
523 for (size_t i = 0; i < pTransformedHypo->aConfirmedPoints.size(); i++)
524 {
525 Vec3d point3D = pTransformedHypo->aConfirmedPoints.at(i)->vPosition;
526 aConfirmedPoints3D.push_back(point3D);
527 Vec2d point2D;
528 calibration->WorldToImageCoordinates(point3D, point2D, false);
529 aConfirmedPoints2D.push_back(point2D);
530 }
531 }
532
533#pragma omp section
534 {
535 for (size_t i = 0; i < pTransformedHypo->aDoubtablePoints.size(); i++)
536 {
537 Vec2d point2D;
538 calibration->WorldToImageCoordinates(
539 pTransformedHypo->aDoubtablePoints.at(i)->vPosition, point2D, false);
540 aNewAndDoubtablePoints2D.push_back(point2D);
541 }
542
543 for (size_t i = 0; i < pTransformedHypo->aNewPoints.size(); i++)
544 {
545 Vec2d point2D;
546 calibration->WorldToImageCoordinates(
547 pTransformedHypo->aNewPoints.at(i)->vPosition, point2D, false);
548 aNewAndDoubtablePoints2D.push_back(point2D);
549 }
550 }
551 }
552
553
554 Mat3d mRotInv;
555 Math3d::Transpose(mRot, mRotInv);
556
557 const float fMinDistanceLimit2D = 0.9f * OLP_DEPTH_MAP_PIXEL_DISTANCE;
558
559#pragma omp parallel for
560 for (size_t j = 0; j < aForegroundPoints.size(); j++)
561 {
562 // check if the old position of the point was in foreground
563 Vec3d vOldPointPosition;
564 Math3d::SubtractVecVec(aForegroundPoints.at(j)->vPosition, vTrans, vTemp);
565 Math3d::MulMatVec(mRotInv, vTemp, vOldPointPosition);
566
568 vOldPointPosition, pForegroundImage, calibration))
569 {
570 // add only if it is close to the object, but not too close to the points already in the hypothesis
571 Vec2d vPos2D = aForegroundPoints2D.at(j);
572 Vec3d vPos3D = aForegroundPoints3D.at(j);
573 float fDist2D, fDist3D;
574 float fMinDist2D = FLT_MAX;
575 float fMinDist3D = FLT_MAX;
576
577 for (size_t k = 0; k < aConfirmedPoints2D.size(); k++)
578 {
579 fDist2D = Math2d::Distance(vPos2D, aConfirmedPoints2D.at(k));
580
581 if (fDist2D < fMinDist2D)
582 {
583 fMinDist2D = fDist2D;
584 }
585
586 fDist3D = Math3d::Distance(vPos3D, aConfirmedPoints3D.at(k));
587
588 if (fDist3D < fMinDist3D)
589 {
590 fMinDist3D = fDist3D;
591 }
592 }
593
596 (fMinDist2D > fMinDistanceLimit2D))
597 {
598 for (size_t k = 0; k < aNewAndDoubtablePoints2D.size(); k++)
599 {
600 fDist2D = Math2d::Distance(vPos2D, aNewAndDoubtablePoints2D.at(k));
601
602 if (fDist2D < fMinDist2D)
603 {
604 fMinDist2D = fDist2D;
605 }
606 }
607
608 if (fMinDist2D > fMinDistanceLimit2D)
609 {
610#pragma omp critical
611 {
612 aNewCandidatePoints.push_back(aForegroundPoints.at(j)->GetCopy());
613 }
614 }
615 }
616 }
617 }
618
619 for (size_t i = 0; i < aNewCandidatePoints.size(); i++)
620 {
621 // float fDist2;
622 // float fMinDist = FLT_MAX;
623 // const Vec2d vPos2D = aCandidatePositions2D.at(i);
624 // for (size_t j=0; j<i; j++)
625 // {
626 // const Vec2d vPos2DComp = aCandidatePositions2D.at(j);
627 // fDist2 = (vPos2D.x-vPos2DComp.x)*(vPos2D.x-vPos2DComp.x) + (vPos2D.y-vPos2DComp.y)*(vPos2D.y-vPos2DComp.y);
628 // if (fDist2<fMinDist) fMinDist = fDist2;
629 // }
630
631 //if ((sqrt(fMinDist) > fMinDistanceLimit) || (rand()%3==0))
632 //if (fMinDist > 0.98*fMinDistanceLimit*fMinDistanceLimit)
633 {
634 pTransformedHypo->aNewPoints.push_back(aNewCandidatePoints.at(i));
635 }
636 }
637
638
639 // copy visible points
640 pTransformedHypo->aVisibleConfirmedPoints.resize(
641 pTransformedHypo->aConfirmedPoints.size());
642
643 for (size_t i = 0; i < pTransformedHypo->aConfirmedPoints.size(); i++)
644 {
645 pTransformedHypo->aVisibleConfirmedPoints.at(i) =
646 pTransformedHypo->aConfirmedPoints.at(i)->GetCopy();
647 }
648
649
650#ifdef OLP_ADD_POINTS_FROM_SEGMENTED_REGION
651 CByteImage* pSegmentationMask = NULL;
653 pTransformedHypo, calibration, pSegmentationMask);
654 COLPTools::FilterForegroundPoints(aNewDepthMapPoints,
655 pSegmentationMask,
656 calibration,
657 pTransformedHypo->aConfirmedPoints);
658
659 for (size_t i = pTransformedHypo->aVisibleConfirmedPoints.size();
660 i < pTransformedHypo->aConfirmedPoints.size();
661 i++)
662 {
663 pTransformedHypo->aVisibleConfirmedPoints.push_back(
664 pTransformedHypo->aConfirmedPoints.at(i)->GetCopy());
665 }
666#endif
667
668
669 // update the position of the center of the hypothesis
670 Vec3d vNewCenter = {0, 0, 0};
671
672 for (size_t i = 0; i < pTransformedHypo->aVisibleConfirmedPoints.size(); i++)
673 {
674 vNewCenter.x += pTransformedHypo->aVisibleConfirmedPoints.at(i)->vPosition.x;
675 vNewCenter.y += pTransformedHypo->aVisibleConfirmedPoints.at(i)->vPosition.y;
676 vNewCenter.z += pTransformedHypo->aVisibleConfirmedPoints.at(i)->vPosition.z;
677 }
678
679 if (pTransformedHypo->aVisibleConfirmedPoints.size() > 0)
680 {
681 pTransformedHypo->vCenter.x =
682 vNewCenter.x / (float)pTransformedHypo->aVisibleConfirmedPoints.size();
683 pTransformedHypo->vCenter.y =
684 vNewCenter.y / (float)pTransformedHypo->aVisibleConfirmedPoints.size();
685 pTransformedHypo->vCenter.z =
686 vNewCenter.z / (float)pTransformedHypo->aVisibleConfirmedPoints.size();
687 }
688
689 // update the maximal extent
690 pTransformedHypo->fMaxExtent = 0;
691 float fDist;
692
693 for (size_t i = 0; i < pTransformedHypo->aVisibleConfirmedPoints.size(); i++)
694 {
695 fDist =
696 Math3d::Distance(pTransformedHypo->vCenter,
697 pTransformedHypo->aVisibleConfirmedPoints.at(i)->vPosition);
698
699 if (fDist > pTransformedHypo->fMaxExtent)
700 {
701 pTransformedHypo->fMaxExtent = fDist;
702 }
703 }
704
705
706 // return validated hypothesis
707 pConfirmedHypothesis = pTransformedHypo;
708 pConfirmedHypothesis->aHypothesisHasMoved.push_back(true);
709 pObjectHypothesis->aHypothesisHasMoved.push_back(true);
710 return true;
711 }
712 else
713 {
715 << "CHypothesisValidationRGBD: not enough points left after transformation";
716 delete pTransformedHypo;
717 pObjectHypothesis->aHypothesisHasMoved.push_back(false);
718 return false;
719 }
720 }
721 else
722 {
723 ARMARX_VERBOSE_S << "CHypothesisValidationRGBD: the transformation shows no motion.";
724 pObjectHypothesis->aHypothesisHasMoved.push_back(false);
725 return false;
726 }
727}
728
731{
732 CHypothesisPoint* result = p1->GetCopy();
733 result->vPosition.x = 0.5f * (p1->vPosition.x + p2->vPosition.x);
734 result->vPosition.y = 0.5f * (p1->vPosition.y + p2->vPosition.y);
735 result->vPosition.z = 0.5f * (p1->vPosition.z + p2->vPosition.z);
736
737 result->vOldPosition.x = 0.5f * (p1->vOldPosition.x + p2->vOldPosition.x);
738 result->vOldPosition.y = 0.5f * (p1->vOldPosition.y + p2->vOldPosition.y);
739 result->vOldPosition.z = 0.5f * (p1->vOldPosition.z + p2->vOldPosition.z);
740
741 result->fColorR = 0.5f * (p1->fColorR + p2->fColorR);
742 result->fColorG = 0.5f * (p1->fColorG + p2->fColorG);
743 result->fColorB = 0.5f * (p1->fColorB + p2->fColorB);
744 result->fIntensity = 0.5f * (p1->fIntensity + p2->fIntensity);
745
746 result->fMembershipProbability =
748
749 return result;
750}
#define float
Definition 16_Level.h:22
#define M_PI
Definition MathTools.h:17
CDynamicArrayTemplate< CObjectHypothesis * > CObjectHypothesisArray
#define OLP_EFFORT_POINTCLOUD_MATCHING
#define OLP_MINIMAL_MOTION_MEASURE
#define OLP_MAX_DISTANCE_FOR_ADDING_FOREGROUND_CANDIDATE_2D
#define OLP_CLUSTERING_FACTOR_FOREGROUND_HYPOTHESES
#define OLP_MAX_DISTANCE_FOR_ADDING_FOREGROUND_CANDIDATE_3D
#define OLP_DEPTH_MAP_PIXEL_DISTANCE
#define OLP_TOLERANCE_CONCURRENT_MOTION
CHypothesisPoint * GetCopy()
static void ValidateInitialHypotheses(const std::vector< CHypothesisPoint * > &aOldDepthMapPoints, const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesisArray *pObjectHypotheses, CObjectHypothesisArray *pConfirmedHypotheses)
static CHypothesisPoint * FuseTwoPoints(CHypothesisPoint *p1, CHypothesisPoint *p2)
static void RevalidateHypotheses(const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesisArray *pObjectHypotheses)
static bool ValidateHypothesis(CPointCloudRegistration *pPointCloudRegistration, const std::vector< CHypothesisPoint * > &aNewDepthMapPoints, const std::vector< CHypothesisPoint * > &aForegroundPoints, const CByteImage *pForegroundImage, const CByteImage *pHSVImage, const CCalibration *calibration, const Vec3d upwardsVector, CObjectHypothesis *pObjectHypothesis, CObjectHypothesis *&pConfirmedHypothesis, const int nEffortLevel=OLP_EFFORT_POINTCLOUD_MATCHING)
static void GenerateAndAddForegroundHypotheses(const std::vector< CHypothesisPoint * > &aOldDepthMapPoints, const CByteImage *pForegroundImage, const CCalibration *calibration, CObjectHypothesisArray *pObjectHypotheses)
std::vector< CHypothesisPoint * > aNewPoints
std::vector< bool > aHypothesisHasMoved
std::vector< CHypothesisPoint * > aDoubtablePoints
std::vector< CHypothesisPoint * > aConfirmedPoints
std::vector< CHypothesisPoint * > aVisibleConfirmedPoints
static void FindPossibleObjectLocations(const CObjectHypothesis *pHypothesis, const CByteImage *pHSVImage, const std::vector< CHypothesisPoint * > &aScenePoints, const CCalibration *calibration, std::vector< Vec3d > &aPossibleLocations, const int desiredNumberOfLocations=0)
void SetScenePointcloud(const std::vector< CHypothesisPoint * > &aScenePoints)
float EstimateTransformation(const std::vector< CHypothesisPoint * > &aObjectPoints, const Vec3d center, Mat3d &mRotation, Vec3d &vTranslation, const Vec3d upwardsVector, const int nEffortLevel=OLP_EFFORT_POINTCLOUD_MATCHING, const std::vector< Vec3d > *pPossibleLocationOffsets=NULL, std::vector< CColorICP::CPointXYZRGBI > *pNearestNeighbors=NULL, std::vector< float > *pPointMatchDistances=NULL, const std::vector< CHypothesisPoint * > *pAdditionalPoints=NULL, const float maxAcceptedDistance=FLT_MAX)
bool CheckObjectMatchAtOriginalPosition(const CObjectHypothesis *pHypothesis, float &distance, const int nEffortLevel=OLP_EFFORT_POINTCLOUD_MATCHING)
#define ARMARX_VERBOSE_S
Definition Logging.h:207
void CreateObjectSegmentationMask(const CObjectHypothesis *pHypothesis, const CCalibration *calibration, CByteImage *&pForegroundImage)
void FilterForegroundPoints(const std::vector< CHypothesisPoint * > &aAllPoints, const CByteImage *pForegroundImage, const CCalibration *calibration, std::vector< CHypothesisPoint * > &aForegroundPoints)
Definition OLPTools.cpp:333
void CalculateForegroundRatioOfHypothesis(const CObjectHypothesis *pHypothesis, const CByteImage *pForegroundImage, const CCalibration *calibration, float &fForegroundRatio, int &nNumForegroundPixels)
Definition OLPTools.cpp:383
void ClusterXMeans(const std::vector< CHypothesisPoint * > &aPoints, const int nMinNumClusters, const int nMaxNumClusters, const float fBICFactor, std::vector< std::vector< CHypothesisPoint * > > &aaPointClusters)
Definition OLPTools.cpp:78
bool PointIsInForeground(const Vec3d vPoint, const CByteImage *pForegroundImage, const CCalibration *calibration)
Definition OLPTools.cpp:356
void RemoveOutliers(std::vector< Vec3d > &aPoints, const float fStdDevFactor, std::vector< int > *pOldIndices)
Definition OLPTools.cpp:829