24 #include <VisionX/interface/components/OpenPoseEstimationInterface.h>
28 #include <VisionX/interface/components/RGBDImageProvider.h>
37 #include <boost/make_shared.hpp>
38 #include <boost/algorithm/string.hpp>
40 #include <openpose/pose/poseParameters.hpp>
41 #include <openpose/pose/poseParametersRender.hpp>
42 #include <Image/IplImageAdaptor.h>
43 #include <Image/PrimitivesDrawer.h>
47 #include <SimoxUtility/json/json.hpp>
61 inline int positiveIntRound(
const T a)
77 providerName = getProperty<std::string>(
"ImageProviderName").getValue();
80 offeringTopic(getProperty<std::string>(
"DebugDrawerTopicName").getValue());
82 radius = getProperty<int>(
"DepthMedianRadius").getValue();
83 useDistortionParameters = getProperty<bool>(
"UseDistortionParameters").getValue();
84 layerName =
"OpenPoseEstimation";
85 renderThreshold = getProperty<float>(
"OP_render_threshold").getValue();
87 cameraNodeName = getProperty<std::string>(
"CameraNodeName").getValue();
88 minimalValidKeypoints = getProperty<int>(
"MinimalAmountKeypoints").getValue();
89 reportOnlyNearestPerson = getProperty<bool>(
"ReportOnlyNearestPerson").getValue();
92 std::vector<Polygon2D::Point> points;
93 std::vector<std::string> pointStrings;
94 std::string
input = getProperty<std::string>(
"WorkspacePolygon").getValue();
96 for (
auto s : pointStrings)
99 std::vector<std::string> workSpacePolygonPoint;
100 boost::split(workSpacePolygonPoint,
s, boost::is_any_of(
","));
103 point.
x = std::strtof(workSpacePolygonPoint.at(0).c_str(),
nullptr);
104 point.
y = std::strtof(workSpacePolygonPoint.at(1).c_str(),
nullptr);
105 points.push_back(point);
109 mode = getProperty<OpenPoseEstimationMode>(
"Mode").getValue();
113 poseModel = op::flagsToPoseModel(
"MPI");
114 ARMARX_INFO <<
"Switching human model from \"" << getProperty<std::string>(
"OP_model_pose").getValue()
115 <<
"\" to \"MPI\" because the OpenPoseEstiamtionMode is set to \"FromTopic\".";
118 std::string
s = getProperty<std::string>(
"Topic_Dimensions").getValue();
126 poseModel = op::flagsToPoseModel(getProperty<std::string>(
"OP_model_pose").getValue());
129 timeoutCounter2d = 0;
130 readErrorCounter2d = 0;
139 numImages =
static_cast<unsigned int>(imageProviderInfo.
numberImages);
140 if (numImages < 1 || numImages > 2)
154 imageBuffer =
new CByteImage*[numImages];
155 for (
unsigned int i = 0 ; i < numImages ; i++)
161 ::ImageProcessor::Zero(maskedrgbImageBuffer);
163 ::ImageProcessor::Zero(depthImageBuffer);
164 openPoseResultImage =
new CByteImage*[1];
168 offeringTopic(getProperty<std::string>(
"OpenPoseEstimation2DTopicName").getValue());
169 listener2DPrx = getTopic<OpenPose2DListenerPrx>(getProperty<std::string>(
"OpenPoseEstimation2DTopicName").getValue());
170 offeringTopic(getProperty<std::string>(
"OpenPoseEstimation3DTopicName").getValue());
171 listener3DPrx = getTopic<OpenPose3DListenerPrx>(getProperty<std::string>(
"OpenPoseEstimation3DTopicName").getValue());
174 stereoCalibration =
nullptr;
175 calibration =
nullptr;
178 visionx::StereoCalibrationInterfacePrx calib = visionx::StereoCalibrationInterfacePrx::checkedCast(imageProviderInfo.
proxy);
181 ARMARX_WARNING <<
"Image provider does not provide a stereo calibration - 3D will not work - " << imageProviderInfo.
proxy->ice_ids();
189 calibration = stereoCalibration->GetLeftCalibration();
194 debugDrawerTopic = getTopic<armarx::DebugDrawerInterfacePrx>(getProperty<std::string>(
"DebugDrawerTopicName").getValue());
195 debugDrawerTopic->clearLayer(layerName);
198 robotStateInterface = getProxy<RobotStateComponentInterfacePrx>(getProperty<std::string>(
"RobotStateComponentName").getValue(),
false,
"",
false);
199 if (robotStateInterface)
206 ARMARX_ERROR <<
"RobotStateCompontent is not avaiable, 3D-Estimation will be deactivated!!";
214 if (getProperty<bool>(
"ActivateOnStartup").getValue())
218 task2DKeypoints->start();
233 task2DKeypoints->stop();
236 delete rgbImageBuffer;
237 delete maskedrgbImageBuffer;
238 delete depthImageBuffer;
239 delete[] imageBuffer;
240 delete[] openPoseResultImage;
241 delete stereoCalibration;
243 imageKeypointBuffer.reset();
258 <<
" (#timeout " << timeoutCounter2d
259 <<
", #read error " << readErrorCounter2d
260 <<
", #success " << sucessCounter2d <<
")";
264 if (
static_cast<unsigned int>(
getImages(providerName, imageBuffer, imageMetaInfo)) != numImages)
266 ++readErrorCounter2d;
268 <<
" (#timeout " << timeoutCounter2d
269 <<
", #read error " << readErrorCounter2d
270 <<
", #success " << sucessCounter2d <<
")";
278 imageKeypointBuffer->addRGBImage(imageBuffer[0], imageMetaInfo->timeProvided);
279 if (imageKeypointBuffer->addDepthImage(imageBuffer[1], imageMetaInfo->timeProvided))
281 timeProvidedImage = imageMetaInfo->timeProvided;
288 std::unique_lock lock_rgb(rgbImageBufferMutex);
289 ::ImageProcessor::CopyImage(imageBuffer[0], rgbImageBuffer);
295 std::unique_lock lock_depth(depthImageBufferMutex);
296 ::ImageProcessor::CopyImage(imageBuffer[1], depthImageBuffer);
299 timeProvidedImage = imageMetaInfo->timeProvided;
311 void OpenPoseEstimation::run()
316 setupOpenPoseEnvironment();
319 while (running2D && task2DKeypoints->isRunning())
327 imageUpdated =
false;
339 if (running3D && localRobot && numImages > 1)
359 std::unique_lock lock(keypointManagerMutex);
360 std::vector<KeypointObjectPtr>::iterator iter = keypointManager->getData().begin();
361 while (iter != keypointManager->getData().end())
367 iter = keypointManager->getData().erase(iter);
377 if (reportOnlyNearestPerson)
386 listener2DPrx->report2DKeypoints(keypointManager->toIce2D(), timeProvidedImage);
387 listener2DPrx->report2DKeypointsNormalized(keypointManager->toIce2D_normalized(imageProviderInfo.
imageFormat.dimension.width, imageProviderInfo.
imageFormat.dimension.height), timeProvidedImage);
389 listener3DPrx->report3DKeypoints(keypointManager->toIce3D(localRobot), timeProvidedImage);
394 listener2DPrx->report2DKeypoints(keypointManager->toIce2D(), timeProvidedImage);
395 listener2DPrx->report2DKeypointsNormalized(keypointManager->toIce2D_normalized(imageProviderInfo.
imageFormat.dimension.width, imageProviderInfo.
imageFormat.dimension.height), timeProvidedImage);
403 void OpenPoseEstimation::setupOpenPoseEnvironment()
408 const op::Point<int> netInputSize = op::flagsToPoint(getProperty<std::string>(
"OP_net_resolution").
getValue(),
"-1x368");
409 const op::Point<int> outputSize = op::flagsToPoint(getProperty<std::string>(
"OP_output_resolution").
getValue(),
"-1x-1");
410 int scaleNumber = getProperty<int>(
"OP_scale_number").getValue();
411 double scaleGap = getProperty<double>(
"OP_scale_gap").getValue();
412 scaleAndSizeExtractor = std::make_shared<op::ScaleAndSizeExtractor>(netInputSize, outputSize, scaleNumber, scaleGap);
414 cvMatToOpInput = std::make_shared<op::CvMatToOpInput>(poseModel);
415 cvMatToOpOutput.reset(
new op::CvMatToOpOutput());
416 opOutputToCvMat.reset(
new op::OpOutputToCvMat());
418 std::string modelFolder;
420 modelFolder = getProperty<std::string>(
"OP_model_folder").getValue();
427 if (!modelFolder.empty() && *modelFolder.rbegin() !=
'/')
431 ARMARX_INFO <<
"Found model path at: " << modelFolder;
432 int numGpuStart = getProperty<int>(
"OP_num_gpu_start").getValue();
433 poseExtractorCaffe = std::make_shared<op::PoseExtractorCaffe>(poseModel, modelFolder, numGpuStart);
434 poseExtractorCaffe->initializationOnThread();
442 IplImage* iplImage = IplImageAdaptor::Adapt(imageBuffer);
443 cv::Mat inputImage = cv::cvarrToMat(iplImage);
444 const op::Point<int> imageSize {inputImage.cols, inputImage.rows};
447 std::vector<double> scaleInputToNetInputs;
448 std::vector<op::Point<int>> netInputSizes;
449 double scaleInputToOutput;
450 op::Point<int> outputResolution;
451 std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution) = scaleAndSizeExtractor->extract(imageSize);
454 const auto netInputArray = cvMatToOpInput->createArray(inputImage, scaleInputToNetInputs, netInputSizes);
457 poseExtractorCaffe->forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
458 const PoseKeypoints poseKeypoints = poseExtractorCaffe->getPoseKeypoints();
460 return poseKeypoints;
466 IdNameMap m1 = keypointManager->getIdNameMap();
467 IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
468 ARMARX_CHECK_EXPRESSION(m1.size() == poseBodyPartMapping.size() && std::equal(m1.begin(), m1.end(), poseBodyPartMapping.begin())) <<
469 "The idNameMap used in the keypointManager must be equal to the poseBodyPartMapping defined by the given poseModel.";
472 const int objects = keypointManager->getData().size();
473 const int numberKeypoints = m1.size();
476 int objectNumber = 0;
479 for (
int i = 0; i < numberKeypoints; i++)
481 const int index = (objectNumber * numberKeypoints + i) * opKeypoints.getSize(2);
482 KeypointPtr p = obj->getNode(
static_cast<unsigned int>(i));
485 opKeypoints[
index] = p->get2D()._x;
486 opKeypoints[
index + 1] = p->get2D()._y;
487 opKeypoints[
index + 2] = p->getConfidence();
491 opKeypoints[
index] = 0.0f;
492 opKeypoints[
index + 1] = 0.0f;
493 opKeypoints[
index + 2] = 0.0f;
499 std::cout << opKeypoints.toString() << std::endl;
502 Render2DResultImage(inputImage, maskedInputImage, opKeypoints, resultImage, poseModel, renderThreshold, brightnessIncrease);
508 Ice::Byte mask[3] = {0, 255, 0};
509 auto pixelCount = inputImage.width * inputImage.height;
512 CByteImage imageMask(maskedInputImage.width, maskedInputImage.height, CByteImage::eGrayScale);
513 CByteImage smoothedImageMask(&imageMask);
514 ::ImageProcessor::Zero(&imageMask);
515 if (brightnessIncrease > 0)
517 for (
int i = 0; i < pixelCount; i += 1)
519 if (maskedInputImage.pixels[i * 3] == mask[0] &&
520 maskedInputImage.pixels[i * 3 + 1] == mask[1] && maskedInputImage.pixels[i * 3 + 2] == mask[2])
522 imageMask.pixels[i] = 255;
526 ::ImageProcessor::GaussianSmooth5x5(&imageMask, &smoothedImageMask);
529 for (
int i = 0; i < pixelCount; i += 1)
531 if (brightnessIncrease > 0)
533 float perc =
static_cast<float>(smoothedImageMask.pixels[i]) / 255.f;
534 int effectiveBrightnessIncrease = brightnessIncrease * perc;
535 resultImage.pixels[i * 3] = std::min<int>(resultImage.pixels[i * 3]
536 + effectiveBrightnessIncrease, 255);
537 resultImage.pixels[i * 3 + 1] = std::min<int>(resultImage.pixels[i * 3 + 1]
538 + effectiveBrightnessIncrease, 255);
539 resultImage.pixels[i * 3 + 2] = std::min<int>(resultImage.pixels[i * 3 + 2]
540 + effectiveBrightnessIncrease, 255);
544 else if (brightnessIncrease < 0)
546 if (maskedInputImage.pixels[i * 3] == mask[0] &&
547 maskedInputImage.pixels[i * 3 + 1] == mask[1] && maskedInputImage.pixels[i * 3 + 2] == mask[2])
549 resultImage.pixels[i * 3] = maskedInputImage.pixels[i * 3 ];
550 resultImage.pixels[i * 3 + 1] = maskedInputImage.pixels[i * 3 + 1];
551 resultImage.pixels[i * 3 + 2] = maskedInputImage.pixels[i * 3 + 2];
563 ::ImageProcessor::CopyImage(&inputImage, &resultImage);
567 if (keypoints.getSize().empty())
572 const std::vector<unsigned int>& posePartPairs = op::getPoseBodyPartPairsRender(poseModel);
575 std::vector<float> keypointColors = op::getPoseColors(poseModel);
577 const auto thicknessCircleRatio = 1.f / 75.f;
578 const auto thicknessLineRatioWRTCircle = 0.75f;
579 const auto area = inputImage.width * inputImage.height;
580 const int numberKeypoints = keypoints.getSize(1);
582 for (
int person = 0 ; person < keypoints.getSize(0) ; person++)
584 const auto personRectangle = op::getKeypointsRectangle(keypoints, person, 0.1f);
585 if (personRectangle.area() > 0)
588 const auto ratioAreas = op::fastMin(1.f, op::fastMax(personRectangle.width /
static_cast<float>(inputImage.width),
589 personRectangle.height /
static_cast<float>(inputImage.height)));
590 const auto thicknessRatio = op::fastMax(positiveIntRound<float>(
static_cast<float>(
std::sqrt(area))
591 * thicknessCircleRatio * ratioAreas), 2);
593 const auto thicknessCircle = op::fastMax(1, (ratioAreas > 0.05f ? thicknessRatio : -1));
594 const auto thicknessLine = op::fastMax(1, positiveIntRound(thicknessRatio * thicknessLineRatioWRTCircle));
595 const auto radius = thicknessRatio / 2;
598 for (
unsigned int i = 0; i < posePartPairs.size(); i = i + 2)
600 const int index1 = (person * numberKeypoints +
static_cast<int>(posePartPairs[i])) * keypoints.getSize(2);
601 const int index2 = (person * numberKeypoints +
static_cast<int>(posePartPairs[i + 1])) * keypoints.getSize(2);
603 float x1 = keypoints[index1 + 0];
604 float y1 = keypoints[index1 + 1];
605 float x2 = keypoints[index2 + 0];
606 float y2 = keypoints[index2 + 1];
608 if (!(x1 == 0.0f && y1 == 0.0f) && !(x2 == 0.0f && y2 == 0.0f))
610 if (keypoints[index1 + 2] > renderThreshold && keypoints[index2 + 2] > renderThreshold)
612 unsigned int colorIndex = posePartPairs[i + 1] * 3;
613 ::PrimitivesDrawer::DrawLine(&resultImage,
616 static_cast<int>(keypointColors[colorIndex + 2]),
617 static_cast<int>(keypointColors[colorIndex + 1]),
618 static_cast<int>(keypointColors[colorIndex + 0]),
625 for (
int i = 0; i < numberKeypoints; i++)
627 const int index = (person * numberKeypoints + i) * keypoints.getSize(2);
628 float x = keypoints[
index + 0];
629 float y = keypoints[
index + 1];
631 if (!(x == 0.0f && y == 0.0f) && keypoints[
index + 2] > renderThreshold)
633 unsigned int colorIndex =
static_cast<unsigned int>(i * 3);
635 ::PrimitivesDrawer::DrawCircle(&resultImage,
639 static_cast<int>(keypointColors[colorIndex + 2]),
640 static_cast<int>(keypointColors[colorIndex + 1]),
641 static_cast<int>(keypointColors[colorIndex + 0]),
660 task2DKeypoints->start();
670 task2DKeypoints->stop(
true);
683 ARMARX_INFO <<
"Starting OpenposeEstimation -- 3D";
694 ARMARX_INFO <<
"Stopping OpenposeEstimation -- 3D";
708 std::unique_lock lock(depthImageBufferMutex);
714 int pixelCount = depthImageBuffer->width * depthImageBuffer->height;
715 int depthThresholdmm = maxDepth;
716 CByteImage maskImage(depthImageBuffer->width, depthImageBuffer->height, CByteImage::eGrayScale);
717 for (
int i = 0; i < pixelCount; i += 1)
719 int z_value = depthImageBuffer->pixels[i * 3 + 0]
720 + (depthImageBuffer->pixels[i * 3 + 1] << 8)
721 + (depthImageBuffer->pixels[i * 3 + 2] << 16);
722 maskImage.pixels[i] = z_value > depthThresholdmm || z_value == 0 ? 0 : 255;
725 ::ImageProcessor::Erode(&maskImage, &maskImage, 5);
726 ::ImageProcessor::Dilate(&maskImage, &maskImage, 20);
727 for (
int i = 0; i < pixelCount; i += 1)
729 if (maskImage.pixels[i] == 0)
731 image.pixels[i * 3] = 0;
732 image.pixels[i * 3 + 1] = 255;
733 image.pixels[i * 3 + 2] = 0;
741 if (point.x < 0 || point.y < 0 || point.x >= image.width || point.y >= image.height)
742 return DrawColor24Bit {0, 0, 0};
743 int divisor = 256 / 3;
744 typedef std::tuple<Ice::Byte, Ice::Byte, Ice::Byte> RGBTuple;
745 std::map<RGBTuple, int> histogram;
746 int halfWindowSize =
static_cast<int>(windowSize * 0.5);
747 int left = std::max<int>(0,
static_cast<int>(point.x) - halfWindowSize);
748 int top = std::max<int>(0,
static_cast<int>(point.y) - halfWindowSize);
749 int right = std::min<int>(image.width,
static_cast<int>(point.x) + halfWindowSize);
750 int bottom = std::min<int>(image.height,
static_cast<int>(point.y) + halfWindowSize);
752 for (
int x = left; x < right; x++)
754 for (
int y = top; y < bottom; y++)
756 int pixelPos = (y * image.width + x) * 3;
757 auto tuple = std::make_tuple<Ice::Byte, Ice::Byte, Ice::Byte>(
static_cast<Ice::Byte
>(image.pixels[pixelPos] / divisor),
758 static_cast<Ice::Byte
>(image.pixels[pixelPos + 1] / divisor),
759 static_cast<Ice::Byte
>(image.pixels[pixelPos + 2] / divisor));
760 if (histogram.count(tuple))
762 histogram.at(tuple)++;
766 histogram[tuple] = 1;
771 float maxHistogramValue = 0;
772 RGBTuple dominantColor;
773 for (
auto& pair : histogram)
775 if (pair.second > maxHistogramValue)
777 dominantColor = pair.first;
778 maxHistogramValue = pair.second;
781 auto rgb = DrawColor24Bit {
static_cast<Ice::Byte
>(std::get<0>(dominantColor) * divisor),
782 static_cast<Ice::Byte
>(std::get<1>(dominantColor) * divisor),
783 static_cast<Ice::Byte
>(std::get<2>(dominantColor) * divisor)};
790 using json = nlohmann::json;
792 json jsonView = json::parse(text.message);
795 long timestamp = jsonView[
"timestamp"].get<
long>();
796 json jsonValues = jsonView[
"values"];
799 IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
803 float adjustingWidth =
static_cast<float>(imageProviderInfo.
imageFormat.dimension.width) /
static_cast<float>(this->incomingKeypointDimensions.width);
804 float adjustingHeight =
static_cast<float>(imageProviderInfo.
imageFormat.dimension.height) /
static_cast<float>(this->incomingKeypointDimensions.height);
806 for (json::iterator it = jsonValues.begin(); it != jsonValues.end(); ++it)
808 std::string jsonLabel = it.key();
811 unsigned int id = pair.first;
812 std::string name = pair.second;
814 json jsonPosition = it.value();
815 json jsonPoint = jsonPosition[
"position"];
816 float x = jsonPoint[
"y"];
817 float y = jsonPoint[
"x"];
821 y *= adjustingHeight;
823 if (x > 0.0f && y > 0.0f)
825 KeypointPtr keypoint = std::make_shared<Keypoint>(x,
830 object->insertNode(keypoint);
833 manager->getData().push_back(
object);
836 if (imageKeypointBuffer->addKeypoints(manager, timestamp))
838 timeProvidedImage = timestamp;
845 IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
848 if (keypoints.getSize().empty())
850 debugDrawerTopic->removeLayer(layerName +
std::to_string(layerCounter));
854 int entities = keypoints.getSize().at(0);
855 int points = keypoints.getSize().at(1);
858 for (
int i = 0; i < entities; i++)
861 for (
int id = 0;
id < points;
id++)
863 float x = keypoints.at({i, id, 0});
864 float y = keypoints.at({i, id, 1});
865 if (x == 0.0f && y == 0.0f)
870 KeypointPtr keypoint = std::make_shared<Keypoint>(x,
872 static_cast<unsigned int>(
id),
873 poseBodyPartMapping.at(
static_cast<unsigned int>(
id)),
874 static_cast<float>(keypoints.at({i, id, 2})));
877 object->insertNode(keypoint);
880 if (
static_cast<int>(object->size()) >= minimalValidKeypoints)
882 manager->getData().push_back(
object);
896 for (
unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
899 if (point && point->is3DSet())
902 if (!workspacePolygon.
isInside(pos))
905 object->removeNode(i);
913 this->keypointManager->filterToNearestN(1);
918 std::unique_lock lock(keypointManagerMutex);
922 std::unique_lock lock_rgb(rgbImageBufferMutex);
924 ::ImageProcessor::CopyImage(rgbImageBuffer, maskedrgbImageBuffer);
934 std::unique_lock lock(resultImageBufferMutex);
935 OpenPoseEstimation::Render2DResultImage(*rgbImageBuffer, *maskedrgbImageBuffer, p, *openPoseResultImage[0], poseModel, renderThreshold, getProperty<int>(
"MaskBrightnessIncrease").getValue());
941 std::unique_lock lock_rgb(rgbImageBufferMutex);
942 std::unique_lock lock_depth(depthImageBufferMutex);
947 keypointManager->matchWith(m2);
951 std::unique_lock lock(resultImageBufferMutex);
962 std::unique_lock lock_rgb(rgbImageBufferMutex);
963 ::ImageProcessor::CopyImage(imageKeypointTriple->rgbImage, rgbImageBuffer);
964 std::unique_lock lock_depth(depthImageBufferMutex);
965 ::ImageProcessor::CopyImage(imageKeypointTriple->depthImage, depthImageBuffer);
966 keypointManager = imageKeypointTriple->keypoints;
970 std::unique_lock lock(resultImageBufferMutex);
971 OpenPoseEstimation::Render2DResultImage(*rgbImageBuffer, *rgbImageBuffer, keypointManager, *openPoseResultImage[0], poseModel, renderThreshold, getProperty<int>(
"MaskBrightnessIncrease").getValue());
982 const int depthThreshold = getProperty<int>(
"MaxDepthDifference").getValue();
986 if (object->size() == 0)
991 std::map<unsigned int, int> depthStorage;
992 std::vector<int> depthsCopy;
995 for (
unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1000 int depth =
getMedianDepthFromImage(
static_cast<int>(point->get2D()._x),
static_cast<int>(point->get2D()._y), radius);
1001 depthStorage[i] = depth;
1002 depthsCopy.push_back(depth);
1007 std::sort(depthsCopy.begin(), depthsCopy.end());
1008 const int medianDepth = depthsCopy.at(depthsCopy.size() / 2);
1009 for (
auto& m : depthStorage)
1011 int depth = m.second;
1012 if (depth > medianDepth + depthThreshold || depth < medianDepth - depthThreshold)
1014 m.second = medianDepth;
1019 for (
unsigned int i = 0; i < op::getPoseBodyPartMapping(poseModel).size(); i++)
1025 imagePoint.x = point->get2D()._x;
1026 imagePoint.y = point->get2D()._y;
1029 calibration->ImageToWorldCoordinates(imagePoint, result,
static_cast<float>(depthStorage.at(i)), useDistortionParameters);
1031 point->set3D(
new FramedPosition(Eigen::Vector3f(result.x, result.y, result.z), cameraNodeName, localRobot->getName()));
1041 for (
unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1047 auto pair = point->getStereo2D();
1048 const Vec2d imagePointL = {pair.first._x, pair.first._y};
1049 const Vec2d imagePointR = {pair.second._x, pair.second._y};
1050 ARMARX_DEBUG <<
"PointL: " << imagePointL.x <<
" " << imagePointL.y;
1051 ARMARX_DEBUG <<
"PointR: " << imagePointR.x <<
" " << imagePointR.y;
1054 PointPair3d* rD =
new PointPair3d();
1055 stereoCalibration->Calculate3DPoint(imagePointL, imagePointR, result,
false, useDistortionParameters, rD);
1056 ARMARX_DEBUG <<
"IVT- ConnectionLine: " << rD->p1.x <<
" " << rD->p1.y <<
" " << rD->p1.z <<
" --- " << rD->p2.x <<
" " << rD->p2.y <<
" " << rD->p2.z;
1058 point->set3D(
new FramedPosition(Eigen::Vector3f(result.x, result.y, result.z), cameraNodeName, localRobot->getName()));
1066 std::unique_lock lock(keypointManagerMutex);
1067 debugDrawerTopic->removeLayer(layerName +
std::to_string(layerCounter));
1070 if (keypointManager->getData().empty() || !localRobot)
1076 const std::vector<unsigned int>& posePartPairs = op::getPoseBodyPartPairsRender(poseModel);
1077 const std::vector<float> keypointColors = op::getPoseColors(poseModel);
1083 layerCounter = (layerCounter ==
MAX_LAYER - 1) ? 0 : layerCounter + 1;
1085 int objectIndex = 0;
1088 std::string objectName =
"object_" +
std::to_string(objectIndex) +
"_";
1091 for (
unsigned int i = 0; i < posePartPairs.size(); i = i + 2)
1093 KeypointPtr point1 =
object->getNode(posePartPairs[i]);
1094 KeypointPtr point2 =
object->getNode(posePartPairs[i + 1]);
1096 if (point1 && point2)
1101 std::string name = point1->getName() +
"_" + point2->getName();
1102 unsigned int colorIndex = posePartPairs[i + 1] * 3;
1103 DrawColor color({keypointColors[colorIndex + 2] / 255.f, keypointColors[colorIndex + 1] / 255.f, keypointColors[colorIndex] / 255.f, 1.0f});
1104 debugDrawerTopic->setLineVisu(layerName +
std::to_string(layerCounter), objectName + name,
new Vector3(p1->x, p1->y, p1->z),
new Vector3(p2->x, p2->y, p2->z), 10, color);
1111 for (
unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1117 auto color24Bit = point->getDominantColor();
1118 auto color = DrawColor {0.0039215f * color24Bit.b, 0.0039215f * color24Bit.g, 0.0039215f * color24Bit.r, 1.0f};
1119 debugDrawerTopic->setSphereVisu(layerName +
std::to_string(layerCounter), objectName + point->getName(),
new Vector3(p->x, p->y, p->z), color, 20);
1121 kp.label = point->getName();
1125 kp.confidence = point->getConfidence();
1126 kpm[point->getName()] = kp;
1141 std::vector<int> depths;
1142 for (
int xoffset = -radius; xoffset < radius; xoffset++)
1144 int xo = x + xoffset;
1145 if (xo < 0 || xo > depthImageBuffer->width)
1149 for (
int yoffset = -radius; yoffset < radius; yoffset++)
1151 int yo = y + yoffset;
1152 if (yo < 0 || yo > depthImageBuffer->height)
1158 if (xoffset * xoffset + yoffset * yoffset <= radius * radius)
1160 unsigned int pixelPos =
static_cast<unsigned int>(3 * (yo * depthImageBuffer->width + xo));
1161 int z_value = depthImageBuffer->pixels[pixelPos + 0]
1162 + (depthImageBuffer->pixels[pixelPos + 1] << 8)
1163 + (depthImageBuffer->pixels[pixelPos + 2] << 16);
1166 depths.push_back(z_value);
1171 std::sort(depths.begin(), depths.end());
1173 return depths.empty() ? 0 : depths[depths.size() / 2];