OpenPoseEstimation.cpp
Go to the documentation of this file.
1/*
2 * This file is part of ArmarX.
3 *
4 * ArmarX is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * ArmarX is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * @package VisionX::ArmarXObjects::OpenPoseEstimation
17 * @author Stefan Reither ( stef dot reither at web dot de )
18 * @date 2018
19 * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
20 * GNU General Public License
21 */
22
23#include "OpenPoseEstimation.h"
24
25#include <boost/algorithm/string.hpp>
26#include <boost/make_shared.hpp>
27
30
34
35#include <VisionX/interface/components/OpenPoseEstimationInterface.h>
36#include <VisionX/interface/components/RGBDImageProvider.h>
40
41#include <Image/IplImageAdaptor.h>
42#include <Image/PrimitivesDrawer.h>
43#include <openpose/pose/poseParameters.hpp>
44#include <openpose/pose/poseParametersRender.hpp>
45
46
47// json
48#include <SimoxUtility/json/json.hpp>
49
50using namespace armarx;
51
52#define MAX_LAYER 5
53
54namespace
55{
56 // This is a workaround because in OpenPose 1.4, this function was called `intRound`, whereas
57 // in OpenPose 1.5.1 it was renamed to `positiveIntRound`. This function is now defined in this
58 // implementation as long as we use several OpenPose versions. As soon as we move to 1.5.1 we
59 // can remove this and use 'op::positiveIntRound` instead.
60 template <typename T>
61 inline int
62 positiveIntRound(const T a)
63 {
64 return int(a + 0.5f);
65 }
66} // namespace
67
74
75void
77{
78 providerName = getProperty<std::string>("ImageProviderName").getValue();
79
80 usingImageProvider(providerName);
81 offeringTopic(getProperty<std::string>("DebugDrawerTopicName").getValue());
82
83 radius = getProperty<int>("DepthMedianRadius").getValue();
84 useDistortionParameters = getProperty<bool>("UseDistortionParameters").getValue();
85 layerName = "OpenPoseEstimation";
86 renderThreshold = getProperty<float>("OP_render_threshold").getValue();
87
88 cameraNodeName = getProperty<std::string>("CameraNodeName").getValue();
89 minimalValidKeypoints = getProperty<int>("MinimalAmountKeypoints").getValue();
90 reportOnlyNearestPerson = getProperty<bool>("ReportOnlyNearestPerson").getValue();
91
92 // Setup workspace-polygon
93 std::vector<Polygon2D::Point> points;
94 std::vector<std::string> pointStrings;
95 std::string input = getProperty<std::string>("WorkspacePolygon").getValue();
96 boost::split(pointStrings, input, boost::is_any_of(";"));
97 for (auto s : pointStrings)
98 {
99 ARMARX_VERBOSE << "WorkspacePolygon: " << s;
100 std::vector<std::string> workSpacePolygonPoint;
101 boost::split(workSpacePolygonPoint, s, boost::is_any_of(","));
102 ARMARX_CHECK_EXPRESSION(workSpacePolygonPoint.size() == 2);
103 Polygon2D::Point point;
104 point.x = std::strtof(workSpacePolygonPoint.at(0).c_str(), nullptr);
105 point.y = std::strtof(workSpacePolygonPoint.at(1).c_str(), nullptr);
106 points.push_back(point);
107 }
108 workspacePolygon = Polygon2D(points);
109
110 mode = getProperty<OpenPoseEstimationMode>("Mode").getValue();
112 {
113 usingTopic("2Dposes");
114 poseModel = op::flagsToPoseModel("MPI");
115 ARMARX_INFO << "Switching human model from \""
116 << getProperty<std::string>("OP_model_pose").getValue()
117 << "\" to \"MPI\" because the OpenPoseEstiamtionMode is set to \"FromTopic\".";
118
119 // parse Topic_Dimensions
120 std::string s = getProperty<std::string>("Topic_Dimensions").getValue();
121 std::vector<std::string> split = armarx::Split(s, "x", true);
122 ARMARX_CHECK_EXPRESSION(split.size() == 2);
123 this->incomingKeypointDimensions.width = armarx::toInt(split.at(0));
124 this->incomingKeypointDimensions.height = armarx::toInt(split.at(1));
125 }
126 else
127 {
128 poseModel = op::flagsToPoseModel(getProperty<std::string>("OP_model_pose").getValue());
129 }
130
131 timeoutCounter2d = 0;
132 readErrorCounter2d = 0;
133 sucessCounter2d = 0;
134}
135
136void
138{
139 visionx::ImageType imageDisplayType = visionx::tools::typeNameToImageType("rgb");
140 imageProviderInfo = getImageProvider(providerName, imageDisplayType);
141
142 numImages = static_cast<unsigned int>(imageProviderInfo.numberImages);
143 if (numImages < 1 || numImages > 2)
144 {
145 ARMARX_FATAL << "invalid number of images. aborting";
146 return;
147 }
150 {
151 if (numImages != 2)
152 {
153 ARMARX_FATAL << "invalid number of images. aborting";
154 return;
155 }
156 }
157
158 imageBuffer = new CByteImage*[numImages];
159 for (unsigned int i = 0; i < numImages; i++)
160 {
161 imageBuffer[i] = visionx::tools::createByteImage(imageProviderInfo);
162 }
163 rgbImageBuffer = visionx::tools::createByteImage(imageProviderInfo);
164 maskedrgbImageBuffer = visionx::tools::createByteImage(imageProviderInfo);
165 ::ImageProcessor::Zero(maskedrgbImageBuffer);
166 depthImageBuffer = visionx::tools::createByteImage(imageProviderInfo);
167 ::ImageProcessor::Zero(depthImageBuffer);
168 openPoseResultImage = new CByteImage*[1];
169 openPoseResultImage[0] = visionx::tools::createByteImage(imageProviderInfo);
171 1, imageProviderInfo.imageFormat.dimension, imageProviderInfo.imageFormat.type);
172
173 offeringTopic(getProperty<std::string>("OpenPoseEstimation2DTopicName").getValue());
174 listener2DPrx = getTopic<OpenPose2DListenerPrx>(
175 getProperty<std::string>("OpenPoseEstimation2DTopicName").getValue());
176 offeringTopic(getProperty<std::string>("OpenPoseEstimation3DTopicName").getValue());
177 listener3DPrx = getTopic<OpenPose3DListenerPrx>(
178 getProperty<std::string>("OpenPoseEstimation3DTopicName").getValue());
179
180 // Calibration
181 stereoCalibration = nullptr;
182 calibration = nullptr;
183 {
184 ARMARX_VERBOSE << "Trying to get StereoCalibrationInterface proxy: "
185 << getIceManager()->getCommunicator()->proxyToString(
186 imageProviderInfo.proxy);
187 visionx::StereoCalibrationInterfacePrx calib =
188 visionx::StereoCalibrationInterfacePrx::checkedCast(imageProviderInfo.proxy);
189 if (!calib)
190 {
192 << "Image provider does not provide a stereo calibration - 3D will not work - "
193 << imageProviderInfo.proxy->ice_ids();
194 }
195 else
196 {
197 ARMARX_VERBOSE << "got StereoCalibrationInterface proxy";
198 stereoCalibration = visionx::tools::convert(calib->getStereoCalibration());
199 ARMARX_VERBOSE << "got StereoCalibration";
200 calibration = stereoCalibration->GetLeftCalibration();
201 ARMARX_VERBOSE << "got mono Calibration";
202 }
203 }
204
206 getProperty<std::string>("DebugDrawerTopicName").getValue());
207 debugDrawerTopic->clearLayer(layerName);
208 ARMARX_VERBOSE << "Trying to get RobotStateComponent proxy";
209 // Trying to access robotStateComponent if it is avaiable
210 robotStateInterface = getProxy<RobotStateComponentInterfacePrx>(
211 getProperty<std::string>("RobotStateComponentName").getValue(), false, "", false);
212 if (robotStateInterface)
213 {
214 localRobot = RemoteRobot::createLocalClone(robotStateInterface);
215 ARMARX_VERBOSE << "RobotStateComponent available";
216 }
217 else
218 {
219 ARMARX_ERROR << "RobotStateCompontent is not avaiable, 3D-Estimation will be deactivated!!";
220 }
221
223 {
224 imageKeypointBuffer.reset(new ImageKeypointBuffer(imageProviderInfo));
225 }
226
227 if (getProperty<bool>("ActivateOnStartup").getValue())
228 {
229 running2D = true;
230 task2DKeypoints = new RunningTask<OpenPoseEstimation>(this, &OpenPoseEstimation::run);
231 task2DKeypoints->start();
232
233 running3D = true;
234 }
235 else
236 {
237 running2D = false;
238 running3D = false;
239 }
240 ARMARX_INFO << "OpenPoseEstimation connect done";
241}
242
243void
245{
246 running2D = false;
247 task2DKeypoints->stop();
248 running3D = false;
249
250 delete rgbImageBuffer;
251 delete maskedrgbImageBuffer;
252 delete depthImageBuffer;
253 delete[] imageBuffer;
254 delete[] openPoseResultImage;
255 delete stereoCalibration;
256
257 imageKeypointBuffer.reset();
258}
259
260void
264
265void
267{
268 if (running2D)
269 {
270 if (!waitForImages(providerName))
271 {
272 ++timeoutCounter2d;
273 ARMARX_WARNING << "Timeout or error in wait for images"
274 << " (#timeout " << timeoutCounter2d << ", #read error "
275 << readErrorCounter2d << ", #success " << sucessCounter2d << ")";
276 }
277 else
278 {
279 if (static_cast<unsigned int>(getImages(providerName, imageBuffer, imageMetaInfo)) !=
280 numImages)
281 {
282 ++readErrorCounter2d;
283 ARMARX_WARNING << "Unable to transfer or read images"
284 << " (#timeout " << timeoutCounter2d << ", #read error "
285 << readErrorCounter2d << ", #success " << sucessCounter2d << ")";
286 return;
287 }
288 else
289 {
290 ++sucessCounter2d;
292 {
293 imageKeypointBuffer->addRGBImage(imageBuffer[0], imageMetaInfo->timeProvided);
294 if (imageKeypointBuffer->addDepthImage(imageBuffer[1],
295 imageMetaInfo->timeProvided))
296 {
297 timeProvidedImage = imageMetaInfo->timeProvided;
298 imageUpdated = true;
299 }
300 }
301 else
302 {
303 {
304 std::unique_lock lock_rgb(rgbImageBufferMutex);
305 ::ImageProcessor::CopyImage(imageBuffer[0], rgbImageBuffer);
306 }
307
308 if (numImages == 2)
309 {
310 {
311 std::unique_lock lock_depth(depthImageBufferMutex);
312 ::ImageProcessor::CopyImage(imageBuffer[1], depthImageBuffer);
313 }
314 }
315 timeProvidedImage = imageMetaInfo->timeProvided;
316 imageUpdated = true;
317 }
318 }
319 }
320 }
321 else
322 {
323 usleep(10000);
324 }
325}
326
327void
328OpenPoseEstimation::run()
329{
330 // If we receive Keypoints from Topic, then we don't need to setup the OpenPose-Environment
332 {
333 setupOpenPoseEnvironment();
334 }
335
336 while (running2D && task2DKeypoints->isRunning())
337 {
338 if (!imageUpdated)
339 {
340 usleep(100);
341 continue;
342 }
343
344 imageUpdated = false;
345
346 // 1. If mode is not listening for topic --> create 2D-Keypoints from images
348 {
350 }
351 else if (mode == OpenPoseEstimationMode::FromTopic)
352 {
354 }
355
356 if (running3D && localRobot && numImages > 1)
357 {
358 // 2. Calculate 3D values
360 localRobot, robotStateInterface, timeProvidedImage);
363 {
364 calculate3DFromDepthImage(keypointManager);
365 }
367 {
368 calculate3DFromStereoImage(keypointManager);
369 }
370 else
371 {
372 // ARMARX_ERROR << "Something went really wrong. The mode is in an unexpected state!! Skipping frame...";
373 continue;
374 }
375
376 // 3. Filter based on workspacePolygon
377 {
378 std::unique_lock lock(keypointManagerMutex);
379 std::vector<KeypointObjectPtr>::iterator iter = keypointManager->getData().begin();
380 while (iter != keypointManager->getData().end())
381 {
382 KeypointObjectPtr p = *iter;
383 // filterKeypointsBasedOnWorkspacePolygon(p);
384 if (p->size() == 0)
385 {
386 iter = keypointManager->getData().erase(iter);
387 }
388 else
389 {
390 iter++;
391 }
392 }
393 }
394
395 // 4. Filter to nearest objects (optional)
396 if (reportOnlyNearestPerson)
397 {
399 }
400
401 // 5. Visualize and report to topic
403
404 ARMARX_VERBOSE << deactivateSpam(0.5) << "Reporting 2Dkeypoints for "
405 << keypointManager->getData().size() << " objects";
406 listener2DPrx->report2DKeypoints(keypointManager->toIce2D(), timeProvidedImage);
407 listener2DPrx->report2DKeypointsNormalized(
408 keypointManager->toIce2D_normalized(imageProviderInfo.imageFormat.dimension.width,
409 imageProviderInfo.imageFormat.dimension.height),
410 timeProvidedImage);
411 ARMARX_VERBOSE << deactivateSpam(0.5) << "Reporting 3Dkeypoints for "
412 << keypointManager->getData().size() << " objects";
413 listener3DPrx->report3DKeypoints(keypointManager->toIce3D(localRobot),
414 timeProvidedImage);
415 }
416 else
417 {
418 ARMARX_VERBOSE << deactivateSpam(0.5) << "Reporting 2Dkeypoints for "
419 << keypointManager->getData().size() << " objects";
420 listener2DPrx->report2DKeypoints(keypointManager->toIce2D(), timeProvidedImage);
421 listener2DPrx->report2DKeypointsNormalized(
422 keypointManager->toIce2D_normalized(imageProviderInfo.imageFormat.dimension.width,
423 imageProviderInfo.imageFormat.dimension.height),
424 timeProvidedImage);
425 }
426
427 // Always provide result image
428 provideResultImages(openPoseResultImage, imageMetaInfo);
429 }
430}
431
432void
433OpenPoseEstimation::setupOpenPoseEnvironment()
434{
435 ARMARX_INFO << "setting up open pose environment";
436 TIMING_START(setupOpenPose);
437
438 const op::Point<int> netInputSize =
439 op::flagsToPoint(getProperty<std::string>("OP_net_resolution").getValue(), "-1x368");
440 const op::Point<int> outputSize =
441 op::flagsToPoint(getProperty<std::string>("OP_output_resolution").getValue(), "-1x-1");
442 int scaleNumber = getProperty<int>("OP_scale_number").getValue();
443 double scaleGap = getProperty<double>("OP_scale_gap").getValue();
444 scaleAndSizeExtractor = std::make_shared<op::ScaleAndSizeExtractor>(
445 netInputSize, outputSize, scaleNumber, scaleGap);
446
447 cvMatToOpInput = std::make_shared<op::CvMatToOpInput>(poseModel);
448 cvMatToOpOutput.reset(new op::CvMatToOpOutput());
449 opOutputToCvMat.reset(new op::OpOutputToCvMat());
450
451 std::string modelFolder;
452
453 modelFolder = getProperty<std::string>("OP_model_folder").getValue();
455 modelFolder, modelFolder, {"/usr/share/OpenPose/", OPENPOSE_MODELS});
456 if (!found)
457 {
458 ARMARX_ERROR << "Could not find model folder!";
459 return;
460 }
461 if (!modelFolder.empty() && *modelFolder.rbegin() != '/')
462 {
463 modelFolder += "/";
464 }
465 ARMARX_INFO << "Found model path at: " << modelFolder;
466 int numGpuStart = getProperty<int>("OP_num_gpu_start").getValue();
467 poseExtractorCaffe =
468 std::make_shared<op::PoseExtractorCaffe>(poseModel, modelFolder, numGpuStart);
469 poseExtractorCaffe->initializationOnThread();
470
471 TIMING_END(setupOpenPose);
472}
473
475OpenPoseEstimation::getPoseKeypoints(CByteImage* imageBuffer)
476{
477 // Step 1 - Convert image to cv::Mat
478 IplImage* iplImage = IplImageAdaptor::Adapt(imageBuffer);
479 cv::Mat inputImage = cv::cvarrToMat(iplImage);
480 const op::Point<int> imageSize{inputImage.cols, inputImage.rows};
481
482 // Step 2 - Get desired scale sizes
483 std::vector<double> scaleInputToNetInputs;
484 std::vector<op::Point<int>> netInputSizes;
485 double scaleInputToOutput;
486 op::Point<int> outputResolution;
487 std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution) =
488 scaleAndSizeExtractor->extract(imageSize);
489
490 // Step 3 - Format input image to OpenPose input and output formats
491 const auto netInputArray =
492 cvMatToOpInput->createArray(inputImage, scaleInputToNetInputs, netInputSizes);
493
494 // Step 4 - Estimate poseKeypoints
495 poseExtractorCaffe->forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
496 const PoseKeypoints poseKeypoints = poseExtractorCaffe->getPoseKeypoints();
497
498 return poseKeypoints;
499}
500
501void
502OpenPoseEstimation::Render2DResultImage(const CByteImage& inputImage,
503 const CByteImage& maskedInputImage,
504 KeypointManagerPtr keypointManager,
505 CByteImage& resultImage,
506 op::PoseModel poseModel,
507 float renderThreshold,
508 int brightnessIncrease)
509{
510 IdNameMap m1 = keypointManager->getIdNameMap();
511 IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
512 ARMARX_CHECK_EXPRESSION(m1.size() == poseBodyPartMapping.size() &&
513 std::equal(m1.begin(), m1.end(), poseBodyPartMapping.begin()))
514 << "The idNameMap used in the keypointManager must be equal to the poseBodyPartMapping "
515 "defined by the given poseModel.";
516
517 // Creating openpose-representation of keypoints stored in keypointManager
518 const int objects = keypointManager->getData().size();
519 const int numberKeypoints = m1.size();
520
521 PoseKeypoints opKeypoints({objects, numberKeypoints, 3});
522 int objectNumber = 0;
523 for (KeypointObjectPtr obj : keypointManager->getData())
524 {
525 for (int i = 0; i < numberKeypoints; i++)
526 {
527 const int index = (objectNumber * numberKeypoints + i) * opKeypoints.getSize(2);
528 KeypointPtr p = obj->getNode(static_cast<unsigned int>(i));
529 if (p)
530 {
531 opKeypoints[index] = p->get2D()._x;
532 opKeypoints[index + 1] = p->get2D()._y;
533 opKeypoints[index + 2] = p->getConfidence();
534 }
535 else
536 {
537 opKeypoints[index] = 0.0f;
538 opKeypoints[index + 1] = 0.0f;
539 opKeypoints[index + 2] = 0.0f;
540 }
541 }
542 objectNumber++;
543 }
544 std::cout << opKeypoints.toString() << std::endl;
545
546 // Render image
547 Render2DResultImage(inputImage,
548 maskedInputImage,
549 opKeypoints,
550 resultImage,
551 poseModel,
552 renderThreshold,
553 brightnessIncrease);
554}
555
556void
558 const CByteImage& maskedInputImage,
559 int brightnessIncrease,
560 const CByteImage& inputImage)
561{
562 Ice::Byte mask[3] = {0, 255, 0};
563 auto pixelCount = inputImage.width * inputImage.height;
564
565 // first create a smoothed mask
566 CByteImage imageMask(maskedInputImage.width, maskedInputImage.height, CByteImage::eGrayScale);
567 CByteImage smoothedImageMask(&imageMask);
568 ::ImageProcessor::Zero(&imageMask);
569 if (brightnessIncrease > 0)
570 {
571 for (int i = 0; i < pixelCount; i += 1)
572 {
573 if (maskedInputImage.pixels[i * 3] == mask[0] &&
574 maskedInputImage.pixels[i * 3 + 1] == mask[1] &&
575 maskedInputImage.pixels[i * 3 + 2] == mask[2])
576 {
577 imageMask.pixels[i] = 255;
578 }
579 }
580 ::ImageProcessor::GaussianSmooth5x5(&imageMask, &smoothedImageMask);
581 }
582 // now apply mask to image
583 for (int i = 0; i < pixelCount; i += 1)
584 {
585 if (brightnessIncrease > 0)
586 {
587 float perc = static_cast<float>(smoothedImageMask.pixels[i]) / 255.f;
588 int effectiveBrightnessIncrease = brightnessIncrease * perc;
589 resultImage.pixels[i * 3] =
590 std::min<int>(resultImage.pixels[i * 3] + effectiveBrightnessIncrease, 255);
591 resultImage.pixels[i * 3 + 1] =
592 std::min<int>(resultImage.pixels[i * 3 + 1] + effectiveBrightnessIncrease, 255);
593 resultImage.pixels[i * 3 + 2] =
594 std::min<int>(resultImage.pixels[i * 3 + 2] + effectiveBrightnessIncrease, 255);
595 }
596 // use original mask
597 else if (brightnessIncrease < 0)
598 {
599 if (maskedInputImage.pixels[i * 3] == mask[0] &&
600 maskedInputImage.pixels[i * 3 + 1] == mask[1] &&
601 maskedInputImage.pixels[i * 3 + 2] == mask[2])
602 {
603 resultImage.pixels[i * 3] = maskedInputImage.pixels[i * 3];
604 resultImage.pixels[i * 3 + 1] = maskedInputImage.pixels[i * 3 + 1];
605 resultImage.pixels[i * 3 + 2] = maskedInputImage.pixels[i * 3 + 2];
606 }
607 }
608 }
609}
610
611void
612OpenPoseEstimation::Render2DResultImage(const CByteImage& inputImage,
613 const CByteImage& maskedInputImage,
615 CByteImage& resultImage,
616 op::PoseModel poseModel,
617 float renderThreshold,
618 int brightnessIncrease)
619{
620 ARMARX_CHECK_EXPRESSION(inputImage.width == resultImage.width);
621 ARMARX_CHECK_EXPRESSION(inputImage.height == resultImage.height);
622 ::ImageProcessor::CopyImage(&inputImage, &resultImage);
623
624 VisualizeTransparentImageMask(resultImage, maskedInputImage, brightnessIncrease, inputImage);
625
626 if (keypoints.getSize().empty())
627 {
628 return;
629 }
630
631 const std::vector<unsigned int>& posePartPairs = op::getPoseBodyPartPairsRender(poseModel);
632
633 // Get colors for points and lines from openpose
634 std::vector<float> keypointColors = op::getPoseColors(poseModel);
635
636 const auto thicknessCircleRatio = 1.f / 75.f;
637 const auto thicknessLineRatioWRTCircle = 0.75f;
638 const auto area = inputImage.width * inputImage.height;
639 const int numberKeypoints = keypoints.getSize(1);
640
641 for (int person = 0; person < keypoints.getSize(0); person++)
642 {
643 const auto personRectangle = op::getKeypointsRectangle(keypoints, person, 0.1f);
644 if (personRectangle.area() > 0)
645 {
646 // define size-dependent variables
647 const auto ratioAreas = op::fastMin(
648 1.f,
649 op::fastMax(personRectangle.width / static_cast<float>(inputImage.width),
650 personRectangle.height / static_cast<float>(inputImage.height)));
651 const auto thicknessRatio =
652 op::fastMax(positiveIntRound<float>(static_cast<float>(std::sqrt(area)) *
653 thicknessCircleRatio * ratioAreas),
654 2);
655 // Negative thickness in ivt::drawCircle means that a filled circle is to be drawn.
656 const auto thicknessCircle = op::fastMax(1, (ratioAreas > 0.05f ? thicknessRatio : -1));
657 const auto thicknessLine =
658 op::fastMax(1, positiveIntRound(thicknessRatio * thicknessLineRatioWRTCircle));
659 const auto radius = thicknessRatio / 2;
660
661 // Draw Lines
662 for (unsigned int i = 0; i < posePartPairs.size(); i = i + 2)
663 {
664 const int index1 = (person * numberKeypoints + static_cast<int>(posePartPairs[i])) *
665 keypoints.getSize(2);
666 const int index2 =
667 (person * numberKeypoints + static_cast<int>(posePartPairs[i + 1])) *
668 keypoints.getSize(2);
669
670 float x1 = keypoints[index1 + 0];
671 float y1 = keypoints[index1 + 1];
672 float x2 = keypoints[index2 + 0];
673 float y2 = keypoints[index2 + 1];
674
675 if (!(x1 == 0.0f && y1 == 0.0f) && !(x2 == 0.0f && y2 == 0.0f))
676 {
677 if (keypoints[index1 + 2] > renderThreshold &&
678 keypoints[index2 + 2] > renderThreshold)
679 {
680 unsigned int colorIndex = posePartPairs[i + 1] * 3;
681 ::PrimitivesDrawer::DrawLine(
682 &resultImage,
683 Vec2d({x1, y1}),
684 Vec2d({x2, y2}),
685 static_cast<int>(keypointColors[colorIndex + 2]),
686 static_cast<int>(keypointColors[colorIndex + 1]),
687 static_cast<int>(keypointColors[colorIndex + 0]),
688 thicknessLine);
689 }
690 }
691 }
692
693 // Draw points
694 for (int i = 0; i < numberKeypoints; i++)
695 {
696 const int index = (person * numberKeypoints + i) * keypoints.getSize(2);
697 float x = keypoints[index + 0];
698 float y = keypoints[index + 1];
699
700 if (!(x == 0.0f && y == 0.0f) && keypoints[index + 2] > renderThreshold)
701 {
702 unsigned int colorIndex = static_cast<unsigned int>(i * 3);
703
704 ::PrimitivesDrawer::DrawCircle(&resultImage,
705 x,
706 y,
707 radius,
708 static_cast<int>(keypointColors[colorIndex + 2]),
709 static_cast<int>(keypointColors[colorIndex + 1]),
710 static_cast<int>(keypointColors[colorIndex + 0]),
711 thicknessCircle);
712 }
713 }
714 }
715 }
716}
717
718void
719OpenPoseEstimation::start(const Ice::Current&)
720{
721 if (running2D)
722 {
723 return;
724 }
725 else
726 {
727 ARMARX_INFO << "Starting OpenposeEstimation";
728 running2D = true;
729 task2DKeypoints = new RunningTask<OpenPoseEstimation>(this, &OpenPoseEstimation::run);
730 task2DKeypoints->start();
731 }
732}
733
734void
735OpenPoseEstimation::stop(const Ice::Current&)
736{
737 if (running2D)
738 {
739 ARMARX_INFO << "Stopping OpenposeEstimation";
740 running2D = false;
741 task2DKeypoints->stop(true);
743 }
744}
745
746void
748{
749 if (running3D)
750 {
751 return;
752 }
753 else
754 {
755 ARMARX_INFO << "Starting OpenposeEstimation -- 3D";
756
757 running3D = true;
758 start();
759 }
760}
761
762void
764{
765 if (running3D)
766 {
767 ARMARX_INFO << "Stopping OpenposeEstimation -- 3D";
768 running3D = false;
769
770 // Remove all layers we might have used
771 for (int i = 0; i < MAX_LAYER; i++)
772 {
773 debugDrawerTopic->removeLayer(layerName + std::to_string(i));
774 }
775 }
776}
777
778void
779OpenPoseEstimation::maskOutBasedOnDepth(CByteImage& image, int maxDepth)
780{
781 std::unique_lock lock(depthImageBufferMutex);
782
783 if (maxDepth <= 0)
784 {
785 return;
786 }
787 int pixelCount = depthImageBuffer->width * depthImageBuffer->height;
788 int depthThresholdmm = maxDepth;
789 CByteImage maskImage(depthImageBuffer->width, depthImageBuffer->height, CByteImage::eGrayScale);
790 for (int i = 0; i < pixelCount; i += 1)
791 {
792 int z_value = depthImageBuffer->pixels[i * 3 + 0] +
793 (depthImageBuffer->pixels[i * 3 + 1] << 8) +
794 (depthImageBuffer->pixels[i * 3 + 2] << 16);
795 maskImage.pixels[i] = z_value > depthThresholdmm || z_value == 0 ? 0 : 255;
796 }
797 ::ImageProcessor::Erode(&maskImage, &maskImage, 5);
798 ::ImageProcessor::Dilate(&maskImage, &maskImage, 20);
799 for (int i = 0; i < pixelCount; i += 1)
800 {
801 if (maskImage.pixels[i] == 0)
802 {
803 image.pixels[i * 3] = 0;
804 image.pixels[i * 3 + 1] = 255;
805 image.pixels[i * 3 + 2] = 0;
806 }
807 }
808}
809
810DrawColor24Bit
812 const Vec2d& point,
813 int windowSize) const
814{
815 if (point.x < 0 || point.y < 0 || point.x >= image.width || point.y >= image.height)
816 return DrawColor24Bit{0, 0, 0}; // Black
817 int divisor = 256 / 3; // split into 3*3*3 bins
818 typedef std::tuple<Ice::Byte, Ice::Byte, Ice::Byte> RGBTuple;
819 std::map<RGBTuple, int> histogram;
820 int halfWindowSize = static_cast<int>(windowSize * 0.5);
821 int left = std::max<int>(0, static_cast<int>(point.x) - halfWindowSize);
822 int top = std::max<int>(0, static_cast<int>(point.y) - halfWindowSize);
823 int right = std::min<int>(image.width, static_cast<int>(point.x) + halfWindowSize);
824 int bottom = std::min<int>(image.height, static_cast<int>(point.y) + halfWindowSize);
825
826 for (int x = left; x < right; x++)
827 {
828 for (int y = top; y < bottom; y++)
829 {
830 int pixelPos = (y * image.width + x) * 3;
831 auto tuple = std::make_tuple<Ice::Byte, Ice::Byte, Ice::Byte>(
832 static_cast<Ice::Byte>(image.pixels[pixelPos] / divisor),
833 static_cast<Ice::Byte>(image.pixels[pixelPos + 1] / divisor),
834 static_cast<Ice::Byte>(image.pixels[pixelPos + 2] / divisor));
835 if (histogram.count(tuple))
836 {
837 histogram.at(tuple)++;
838 }
839 else
840 {
841 histogram[tuple] = 1;
842 }
843 }
844 }
845
846 float maxHistogramValue = 0;
847 RGBTuple dominantColor;
848 for (auto& pair : histogram)
849 {
850 if (pair.second > maxHistogramValue)
851 {
852 dominantColor = pair.first;
853 maxHistogramValue = pair.second;
854 }
855 }
856 auto rgb = DrawColor24Bit{static_cast<Ice::Byte>(std::get<0>(dominantColor) * divisor),
857 static_cast<Ice::Byte>(std::get<1>(dominantColor) * divisor),
858 static_cast<Ice::Byte>(std::get<2>(dominantColor) * divisor)};
859 return rgb;
860}
861
862void
863OpenPoseEstimation::onMessage(const Texting::TextMessage& text, const Ice::Current&)
864{
865 using json = nlohmann::json;
866 // parsing message
867 json jsonView = json::parse(text.message);
868
869 // Reading values
870 long timestamp = jsonView["timestamp"].get<long>();
871 json jsonValues = jsonView["values"];
872
873 // Constructing and populating KeypointManager
874 IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
875 KeypointManagerPtr manager = std::make_shared<KeypointManager>(poseBodyPartMapping);
876 KeypointObjectPtr object = std::make_shared<KeypointObject>();
877
878 float adjustingWidth = static_cast<float>(imageProviderInfo.imageFormat.dimension.width) /
879 static_cast<float>(this->incomingKeypointDimensions.width);
880 float adjustingHeight = static_cast<float>(imageProviderInfo.imageFormat.dimension.height) /
881 static_cast<float>(this->incomingKeypointDimensions.height);
882
883 for (json::iterator it = jsonValues.begin(); it != jsonValues.end(); ++it)
884 {
885 std::string jsonLabel = it.key();
886
887 auto pair = MPII_TO_MPI.at(jsonLabel);
888 unsigned int id = pair.first;
889 std::string name = pair.second;
890
891 json jsonPosition = it.value();
892 json jsonPoint = jsonPosition["position"];
893 float x = jsonPoint["y"]; // wrong naming in secondHands output
894 float y = jsonPoint["x"];
895
896 // Adjust Keypoints to fit to image dimensions
897 x *= adjustingWidth;
898 y *= adjustingHeight;
899
900 if (x > 0.0f && y > 0.0f)
901 {
902 KeypointPtr keypoint = std::make_shared<Keypoint>(
903 x,
904 y,
905 id,
906 name,
907 1.0f); // Since to Topic from SecondHands does not provide a confidence, we simply use 1 for every keypoint;
908 object->insertNode(keypoint);
909 }
910 }
911 manager->getData().push_back(object);
912
913 // Inserting into buffer
914 if (imageKeypointBuffer->addKeypoints(manager, timestamp))
915 {
916 timeProvidedImage = timestamp;
917 imageUpdated = true;
918 }
919}
920
923 const CByteImage& rgbImage) const
924{
925 IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
926 KeypointManagerPtr manager = std::make_shared<KeypointManager>(poseBodyPartMapping);
927
928 if (keypoints.getSize().empty())
929 {
930 debugDrawerTopic->removeLayer(
931 layerName +
932 std::to_string(
933 layerCounter)); // TODO find better way to clear if no persons are visible
934 return manager;
935 }
936
937 int entities = keypoints.getSize().at(0);
938 int points = keypoints.getSize().at(1);
939 ARMARX_CHECK_EXPRESSION(points == static_cast<int>(poseBodyPartMapping.size()) - 1);
940
941 for (int i = 0; i < entities; i++)
942 {
943 KeypointObjectPtr object = std::make_shared<KeypointObject>();
944 for (int id = 0; id < points; id++)
945 {
946 float x = keypoints.at({i, id, 0});
947 float y = keypoints.at({i, id, 1});
948 if (x == 0.0f && y == 0.0f)
949 {
950 continue; // Invalid keypoint from openpose
951 }
952
953 KeypointPtr keypoint =
954 std::make_shared<Keypoint>(x,
955 y,
956 static_cast<unsigned int>(id),
957 poseBodyPartMapping.at(static_cast<unsigned int>(id)),
958 static_cast<float>(keypoints.at({i, id, 2})));
959
960 keypoint->setDominantColor(
961 getDominantColorOfPatch(rgbImage, Vec2d{x, y}, rgbImage.width / 50));
962 object->insertNode(keypoint);
963 }
964
965 if (static_cast<int>(object->size()) >= minimalValidKeypoints)
966 {
967 manager->getData().push_back(object);
968 }
969 }
970
971 return manager;
972}
973
974void
976{
977 if (!localRobot)
978 {
979 return;
980 }
981
982 for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
983 {
984 KeypointPtr point = object->getNode(i);
985 if (point && point->is3DSet())
986 {
987 FramedPositionPtr pos = point->get3D()->toGlobal(localRobot); // Changing frame of copy
988 if (!workspacePolygon.isInside(pos))
989 {
990 ARMARX_VERBOSE << deactivateSpam(1) << "removing keypoint due out of workspace";
991 object->removeNode(i);
992 }
993 }
994 }
995}
996
997void
999{
1000 this->keypointManager->filterToNearestN(1);
1001}
1002
1003void
1005{
1006 std::unique_lock lock(keypointManagerMutex);
1008 {
1009 // OpenPoseCall nur auf erstem Bild
1010 std::unique_lock lock_rgb(rgbImageBufferMutex);
1011 // remove pixels that are far away
1012 ::ImageProcessor::CopyImage(rgbImageBuffer, maskedrgbImageBuffer);
1013 if (numImages > 1)
1014 {
1015 maskOutBasedOnDepth(*maskedrgbImageBuffer, getProperty<int>("MaxDepth").getValue());
1016 }
1017 PoseKeypoints p = getPoseKeypoints(maskedrgbImageBuffer);
1018 keypointManager = generate2DKeypoints(p, maskedrgbImageBuffer);
1019
1020 // Render result image
1021 {
1022 std::unique_lock lock(resultImageBufferMutex);
1024 *rgbImageBuffer,
1025 *maskedrgbImageBuffer,
1026 p,
1027 *openPoseResultImage[0],
1028 poseModel,
1029 renderThreshold,
1030 getProperty<int>("MaskBrightnessIncrease").getValue());
1031 }
1032 }
1034 {
1035 // OpenPoseCall auf beiden Bildern
1036 std::unique_lock lock_rgb(rgbImageBufferMutex);
1037 std::unique_lock lock_depth(depthImageBufferMutex);
1038 PoseKeypoints left = getPoseKeypoints(rgbImageBuffer);
1039 keypointManager = generate2DKeypoints(left, rgbImageBuffer);
1040 PoseKeypoints right = getPoseKeypoints(depthImageBuffer);
1041 KeypointManagerPtr m2 = generate2DKeypoints(right, depthImageBuffer);
1042 keypointManager->matchWith(m2);
1043
1044 // Render result imageint)
1045 {
1046 std::unique_lock lock(resultImageBufferMutex);
1048 *rgbImageBuffer,
1049 *rgbImageBuffer,
1050 left,
1051 *openPoseResultImage[0],
1052 poseModel,
1053 renderThreshold,
1054 getProperty<int>("MaskBrightnessIncrease").getValue());
1055 }
1056 }
1057}
1058
1059void
1061{
1062 // ImageKeypointBuffer contains a triple for the timestamp saved in this->timeProvidedImage
1063
1065 imageKeypointBuffer->getTripleAtTimestamp(timeProvidedImage, true);
1066 std::unique_lock lock_rgb(rgbImageBufferMutex);
1067 ::ImageProcessor::CopyImage(imageKeypointTriple->rgbImage, rgbImageBuffer);
1068 std::unique_lock lock_depth(depthImageBufferMutex);
1069 ::ImageProcessor::CopyImage(imageKeypointTriple->depthImage, depthImageBuffer);
1070 keypointManager = imageKeypointTriple->keypoints;
1071
1072 // Render result image
1073 {
1074 std::unique_lock lock(resultImageBufferMutex);
1076 *rgbImageBuffer,
1077 *rgbImageBuffer,
1078 keypointManager,
1079 *openPoseResultImage[0],
1080 poseModel,
1081 renderThreshold,
1082 getProperty<int>("MaskBrightnessIncrease").getValue());
1083 }
1084}
1085
1086void
1088{
1089 if (!localRobot)
1090 {
1091 return;
1092 }
1093
1094 const int depthThreshold = getProperty<int>("MaxDepthDifference").getValue();
1095
1096 for (KeypointObjectPtr object : manager->getData())
1097 {
1098 if (object->size() == 0)
1099 {
1100 continue;
1101 }
1102
1103 std::map<unsigned int, int>
1104 depthStorage; // we do not want to store perspective depth-values in a Keypoint, we transfer them into world-coordiantes (in cameraframe) anyways
1105 std::vector<int> depthsCopy;
1106
1107 // Get camera depth information from image coordinates
1108 for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1109 {
1110 KeypointPtr point = object->getNode(i);
1111 if (point)
1112 {
1113 int depth = getMedianDepthFromImage(static_cast<int>(point->get2D()._x),
1114 static_cast<int>(point->get2D()._y),
1115 radius);
1116 depthStorage[i] = depth;
1117 depthsCopy.push_back(depth);
1118 }
1119 }
1120
1121 // Find outlier in depth values and set them to median of depth values
1122 std::sort(depthsCopy.begin(), depthsCopy.end());
1123 const int medianDepth = depthsCopy.at(depthsCopy.size() / 2);
1124 for (auto& m : depthStorage)
1125 {
1126 int depth = m.second;
1127 if (depth > medianDepth + depthThreshold || depth < medianDepth - depthThreshold)
1128 {
1129 m.second = medianDepth;
1130 }
1131 }
1132
1133 // Transform pixel + depth into world coordinates
1134 for (unsigned int i = 0; i < op::getPoseBodyPartMapping(poseModel).size(); i++)
1135 {
1136 KeypointPtr point = object->getNode(i);
1137 if (point)
1138 {
1139 Vec2d imagePoint;
1140 imagePoint.x = point->get2D()._x;
1141 imagePoint.y = point->get2D()._y;
1142 Vec3d result;
1143 ARMARX_CHECK_EXPRESSION(calibration);
1144 calibration->ImageToWorldCoordinates(imagePoint,
1145 result,
1146 static_cast<float>(depthStorage.at(i)),
1147 useDistortionParameters);
1148
1149 point->set3D(new FramedPosition(Eigen::Vector3f(result.x, result.y, result.z),
1150 cameraNodeName,
1151 localRobot->getName()));
1152 }
1153 }
1154 }
1155}
1156
1157void
1159{
1160 for (KeypointObjectPtr object : manager->getData())
1161 {
1162 for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1163 {
1164 KeypointPtr point = object->getNode(i);
1165 if (point)
1166 {
1167 ARMARX_CHECK_EXPRESSION(point->isStereo());
1168 auto pair = point->getStereo2D();
1169 const Vec2d imagePointL = {pair.first._x, pair.first._y};
1170 const Vec2d imagePointR = {pair.second._x, pair.second._y};
1171 ARMARX_DEBUG << "PointL: " << imagePointL.x << " " << imagePointL.y;
1172 ARMARX_DEBUG << "PointR: " << imagePointR.x << " " << imagePointR.y;
1173
1174 Vec3d result;
1175 PointPair3d* rD = new PointPair3d();
1176 stereoCalibration->Calculate3DPoint(
1177 imagePointL, imagePointR, result, false, useDistortionParameters, rD);
1178 ARMARX_DEBUG << "IVT- ConnectionLine: " << rD->p1.x << " " << rD->p1.y << " "
1179 << rD->p1.z << " --- " << rD->p2.x << " " << rD->p2.y << " "
1180 << rD->p2.z;
1181
1182 point->set3D(new FramedPosition(Eigen::Vector3f(result.x, result.y, result.z),
1183 cameraNodeName,
1184 localRobot->getName()));
1185 }
1186 }
1187 }
1188}
1189
1190void
1192{
1193 std::unique_lock lock(keypointManagerMutex);
1194 debugDrawerTopic->removeLayer(layerName + std::to_string(layerCounter));
1195
1196 viz::Layer openPoseArVizLayer = arviz.layer(layerName);
1197 if (keypointManager->getData().empty() || !localRobot)
1198 {
1199 arviz.commit(openPoseArVizLayer);
1200 return;
1201 }
1202
1203 const std::vector<unsigned int>& posePartPairs = op::getPoseBodyPartPairsRender(poseModel);
1204 const std::vector<float> keypointColors = op::getPoseColors(poseModel);
1205
1206
1207 int human = 1;
1208
1209 layerCounter = (layerCounter == MAX_LAYER - 1) ? 0 : layerCounter + 1;
1210
1211 int objectIndex = 0;
1212 for (KeypointObjectPtr object : keypointManager->getData())
1213 {
1214 std::string objectName = "object_" + std::to_string(objectIndex) + "_";
1215
1216 // Draw Lines
1217 for (unsigned int i = 0; i < posePartPairs.size(); i = i + 2)
1218 {
1219 KeypointPtr point1 = object->getNode(posePartPairs[i]);
1220 KeypointPtr point2 = object->getNode(posePartPairs[i + 1]);
1221
1222 if (point1 && point2)
1223 {
1224 FramedPositionPtr p1 = point1->get3D()->toGlobal(localRobot);
1225 FramedPositionPtr p2 = point2->get3D()->toGlobal(localRobot);
1226
1227 std::string name = point1->getName() + "_" + point2->getName();
1228 unsigned int colorIndex = posePartPairs[i + 1] * 3;
1229 DrawColor color({keypointColors[colorIndex + 2] / 255.f,
1230 keypointColors[colorIndex + 1] / 255.f,
1231 keypointColors[colorIndex] / 255.f,
1232 1.0f});
1233 debugDrawerTopic->setLineVisu(layerName + std::to_string(layerCounter),
1234 objectName + name,
1235 new Vector3(p1->x, p1->y, p1->z),
1236 new Vector3(p2->x, p2->y, p2->z),
1237 10,
1238 color);
1239 }
1240 }
1241
1242 Keypoint3DMap kpm;
1243
1244 // Draw Points
1245 for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1246 {
1247 KeypointPtr point = object->getNode(i);
1248 if (point)
1249 {
1250 FramedPositionPtr p = point->get3D()->toGlobal(localRobot);
1251 auto color24Bit = point->getDominantColor();
1252 auto color = DrawColor{0.0039215f * color24Bit.b,
1253 0.0039215f * color24Bit.g,
1254 0.0039215f * color24Bit.r,
1255 1.0f};
1256 debugDrawerTopic->setSphereVisu(layerName + std::to_string(layerCounter),
1257 objectName + point->getName(),
1258 new Vector3(p->x, p->y, p->z),
1259 color,
1260 20);
1261 Keypoint3D kp;
1262 kp.label = point->getName();
1263 kp.x = p->x;
1264 kp.y = p->y;
1265 kp.z = p->z;
1266 kp.confidence = point->getConfidence();
1267 kpm[point->getName()] = kp;
1268 }
1269 }
1270
1271 visionx::viz::HumanPoseBody25::addPoseToLayer(
1272 kpm, openPoseArVizLayer, "human" + std::to_string(human));
1273 human++;
1274 }
1275
1276 arviz.commit(openPoseArVizLayer);
1277}
1278
1279int
1281{
1282 std::vector<int> depths;
1283 for (int xoffset = -radius; xoffset < radius; xoffset++)
1284 {
1285 int xo = x + xoffset;
1286 if (xo < 0 || xo > depthImageBuffer->width)
1287 {
1288 continue;
1289 }
1290 for (int yoffset = -radius; yoffset < radius; yoffset++)
1291 {
1292 int yo = y + yoffset;
1293 if (yo < 0 || yo > depthImageBuffer->height)
1294 {
1295 continue;
1296 }
1297
1298 // Check whether (x,y) is in circle:
1299 if (xoffset * xoffset + yoffset * yoffset <= radius * radius)
1300 {
1301 unsigned int pixelPos =
1302 static_cast<unsigned int>(3 * (yo * depthImageBuffer->width + xo));
1303 int z_value = depthImageBuffer->pixels[pixelPos + 0] +
1304 (depthImageBuffer->pixels[pixelPos + 1] << 8) +
1305 (depthImageBuffer->pixels[pixelPos + 2] << 16);
1306 if (z_value > 0)
1307 {
1308 depths.push_back(z_value);
1309 }
1310 }
1311 }
1312 }
1313 std::sort(depths.begin(), depths.end());
1314
1315 return depths.empty() ? 0 : depths[depths.size() / 2];
1316}
std::string timestamp()
uint8_t index
#define MAX_LAYER
static bool getAbsolutePath(const std::string &relativeFilename, std::string &storeAbsoluteFilename, const std::vector< std::string > &additionalSearchPaths={}, bool verbose=true)
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition Component.cpp:90
Property< PropertyType > getProperty(const std::string &name)
The FramedPosition class.
Definition FramedPose.h:158
std::shared_ptr< ImageKeypointTriple > ImageKeypointTriplePtr
SpamFilterDataPtr deactivateSpam(float deactivationDurationSec=10.0f, const std::string &identifier="", bool deactivate=true) const
disables the logging for the current line for the given amount of seconds.
Definition Logging.cpp:99
void offeringTopic(const std::string &name)
Registers a topic for retrival after initialization.
TopicProxyType getTopic(const std::string &name)
Returns a proxy of the specified topic.
void usingTopic(const std::string &name, bool orderedPublishing=false)
Registers a proxy for subscription after initialization.
IceManagerPtr getIceManager() const
Returns the IceManager.
Ice::ObjectPrx getProxy(long timeoutMs=0, bool waitForScheduler=true) const
Returns the proxy of this object (optionally it waits for the proxy)
void start3DPoseEstimation(const Ice::Current &=Ice::emptyCurrent) override
void onConnectImageProcessor() override
Implement this method in the ImageProcessor in order execute parts when the component is fully initia...
void stop(const Ice::Current &=Ice::emptyCurrent) override
void onExitImageProcessor() override
Exit the ImapeProcessor component.
DrawColor24Bit getDominantColorOfPatch(const CByteImage &image, const Vec2d &point, int windowSize=10) const
void start(const Ice::Current &=Ice::emptyCurrent) override
void stop3DPoseEstimation(const Ice::Current &=Ice::emptyCurrent) override
void calculate3DFromStereoImage(KeypointManagerPtr manager)
static void VisualizeTransparentImageMask(CByteImage &resultImage, const CByteImage &maskedInputImage, int brightnessIncrease, const CByteImage &inputImage)
int getMedianDepthFromImage(int x, int y, int radius) const
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
void process() override
Process the vision component.
KeypointManagerPtr generate2DKeypoints(PoseKeypoints &keypoints, const CByteImage &rgbImage) const
static void Render2DResultImage(const CByteImage &inputImage, const CByteImage &maskedInputImage, KeypointManagerPtr keypointManager, CByteImage &resultImage, op::PoseModel poseModel, float renderThreshold, int brightnessIncrease=100)
void onInitImageProcessor() override
Setup the vision component.
void calculate3DFromDepthImage(KeypointManagerPtr manager)
void maskOutBasedOnDepth(CByteImage &image, int maxDepth)
void onMessage(const Texting::TextMessage &text, const Ice::Current &=Ice::emptyCurrent) override
void onDisconnectImageProcessor() override
Implement this method in the ImageProcessor in order execute parts when the component looses network ...
void filterKeypointsBasedOnWorkspacePolygon(KeypointObjectPtr object)
Property< PropertyType > getProperty(const std::string &name)
Property creation and retrieval.
VirtualRobot::RobotPtr createLocalClone()
Clones the structure of this remote robot to a local instance.
static bool synchronizeLocalCloneToTimestamp(VirtualRobot::RobotPtr robot, RobotStateComponentInterfacePrx robotStatePrx, Ice::Long timestamp)
Synchronizes a local robot to a robot state at timestamp.
The Vector3 class.
Definition Pose.h:113
void enableResultImages(int numberImages, ImageDimension imageDimension, ImageType imageType, const std::string &name="")
Enables visualization.
void usingImageProvider(std::string name)
Registers a delayed topic subscription and a delayed provider proxy retrieval which all will be avail...
bool waitForImages(int milliseconds=1000)
Wait for new images.
ImageProviderInfo getImageProvider(std::string name, ImageType destinationImageType=eRgb, bool waitForProxy=false)
Select an ImageProvider.
int getImages(CByteImage **ppImages)
Poll images from provider.
void provideResultImages(CByteImage **images, armarx::MetaInfoSizeBasePtr info=nullptr)
sends result images for visualization
#define ARMARX_CHECK_EXPRESSION(expression)
This macro evaluates the expression and if it turns out to be false it will throw an ExpressionExcept...
#define ARMARX_INFO
The normal logging level.
Definition Logging.h:181
#define ARMARX_FATAL
The logging level for unexpected behaviour, that will lead to a seriously malfunctioning program and ...
Definition Logging.h:199
#define ARMARX_ERROR
The logging level for unexpected behaviour, that must be fixed.
Definition Logging.h:196
#define ARMARX_DEBUG
The logging level for output that is only interesting while debugging.
Definition Logging.h:184
#define ARMARX_WARNING
The logging level for unexpected behaviour, but not a serious problem.
Definition Logging.h:193
#define ARMARX_VERBOSE
The logging level for verbose information.
Definition Logging.h:187
#define TIMING_START(name)
Helper macro to do timing tests.
Definition TimeUtil.h:289
#define TIMING_END(name)
Prints duration.
Definition TimeUtil.h:306
T getValue(nlohmann::json &userConfig, nlohmann::json &defaultConfig, const std::string &entryName)
Definition utils.h:80
This file offers overloads of toIce() and fromIce() functions for STL container types.
std::vector< std::string > split(const std::string &source, const std::string &splitBy, bool trimElements=false, bool removeEmptyElements=false)
std::vector< std::string > Split(const std::string &source, const std::string &splitBy, bool trimElements=false, bool removeEmptyElements=false)
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
IceInternal::Handle< FramedPosition > FramedPositionPtr
Definition FramedPose.h:149
std::shared_ptr< KeypointManager > KeypointManagerPtr
const std::map< std::string, std::pair< unsigned int, std::string > > MPII_TO_MPI
Definition Util.h:30
std::shared_ptr< Keypoint > KeypointPtr
int toInt(const std::string &input)
std::shared_ptr< KeypointObject > KeypointObjectPtr
const std::map< unsigned int, std::string > & IdNameMap
ImageType typeNameToImageType(const std::string &imageTypeName)
Converts an image type name as string into an ImageType integer.
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.
CByteImage::ImageType convert(const ImageType visionxImageType)
Converts a VisionX image type into an image type of IVT's ByteImage.