OpenPoseEstimation.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * ArmarX is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * ArmarX is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program. If not, see <http://www.gnu.org/licenses/>.
15  *
16  * @package VisionX::ArmarXObjects::OpenPoseEstimation
17  * @author Stefan Reither ( stef dot reither at web dot de )
18  * @date 2018
19  * @copyright http://www.gnu.org/licenses/gpl-2.0.txt
20  * GNU General Public License
21  */
22 
23 #include "OpenPoseEstimation.h"
24 #include <VisionX/interface/components/OpenPoseEstimationInterface.h>
25 
28 #include <VisionX/interface/components/RGBDImageProvider.h>
32 
36 
37 #include <boost/make_shared.hpp>
38 #include <boost/algorithm/string.hpp>
39 
40 #include <openpose/pose/poseParameters.hpp>
41 #include <openpose/pose/poseParametersRender.hpp>
42 #include <Image/IplImageAdaptor.h>
43 #include <Image/PrimitivesDrawer.h>
44 
45 
46 // json
47 #include <SimoxUtility/json/json.hpp>
48 
49 using namespace armarx;
50 
51 #define MAX_LAYER 5
52 
53 
54 namespace
55 {
56  // This is a workaround because in OpenPose 1.4, this function was called `intRound`, whereas
57  // in OpenPose 1.5.1 it was renamed to `positiveIntRound`. This function is now defined in this
58  // implementation as long as we use several OpenPose versions. As soon as we move to 1.5.1 we
59  // can remove this and use 'op::positiveIntRound` instead.
60  template<typename T>
61  inline int positiveIntRound(const T a)
62  {
63  return int(a + 0.5f);
64  }
65 }
66 
67 
68 
70 {
73 }
74 
76 {
77  providerName = getProperty<std::string>("ImageProviderName").getValue();
78 
79  usingImageProvider(providerName);
80  offeringTopic(getProperty<std::string>("DebugDrawerTopicName").getValue());
81 
82  radius = getProperty<int>("DepthMedianRadius").getValue();
83  useDistortionParameters = getProperty<bool>("UseDistortionParameters").getValue();
84  layerName = "OpenPoseEstimation";
85  renderThreshold = getProperty<float>("OP_render_threshold").getValue();
86 
87  cameraNodeName = getProperty<std::string>("CameraNodeName").getValue();
88  minimalValidKeypoints = getProperty<int>("MinimalAmountKeypoints").getValue();
89  reportOnlyNearestPerson = getProperty<bool>("ReportOnlyNearestPerson").getValue();
90 
91  // Setup workspace-polygon
92  std::vector<Polygon2D::Point> points;
93  std::vector<std::string> pointStrings;
94  std::string input = getProperty<std::string>("WorkspacePolygon").getValue();
95  boost::split(pointStrings, input, boost::is_any_of(";"));
96  for (auto s : pointStrings)
97  {
98  ARMARX_VERBOSE << "WorkspacePolygon: " << s;
99  std::vector<std::string> workSpacePolygonPoint;
100  boost::split(workSpacePolygonPoint, s, boost::is_any_of(","));
101  ARMARX_CHECK_EXPRESSION(workSpacePolygonPoint.size() == 2);
102  Polygon2D::Point point;
103  point.x = std::strtof(workSpacePolygonPoint.at(0).c_str(), nullptr);
104  point.y = std::strtof(workSpacePolygonPoint.at(1).c_str(), nullptr);
105  points.push_back(point);
106  }
107  workspacePolygon = Polygon2D(points);
108 
109  mode = getProperty<OpenPoseEstimationMode>("Mode").getValue();
111  {
112  usingTopic("2Dposes");
113  poseModel = op::flagsToPoseModel("MPI");
114  ARMARX_INFO << "Switching human model from \"" << getProperty<std::string>("OP_model_pose").getValue()
115  << "\" to \"MPI\" because the OpenPoseEstiamtionMode is set to \"FromTopic\".";
116 
117  // parse Topic_Dimensions
118  std::string s = getProperty<std::string>("Topic_Dimensions").getValue();
119  std::vector<std::string> split = armarx::Split(s, "x", true);
120  ARMARX_CHECK_EXPRESSION(split.size() == 2);
121  this->incomingKeypointDimensions.width = armarx::toInt(split.at(0));
122  this->incomingKeypointDimensions.height = armarx::toInt(split.at(1));
123  }
124  else
125  {
126  poseModel = op::flagsToPoseModel(getProperty<std::string>("OP_model_pose").getValue());
127  }
128 
129  timeoutCounter2d = 0;
130  readErrorCounter2d = 0;
131  sucessCounter2d = 0;
132 }
133 
135 {
136  visionx::ImageType imageDisplayType = visionx::tools::typeNameToImageType("rgb");
137  imageProviderInfo = getImageProvider(providerName, imageDisplayType);
138 
139  numImages = static_cast<unsigned int>(imageProviderInfo.numberImages);
140  if (numImages < 1 || numImages > 2)
141  {
142  ARMARX_FATAL << "invalid number of images. aborting";
143  return;
144  }
146  {
147  if (numImages != 2)
148  {
149  ARMARX_FATAL << "invalid number of images. aborting";
150  return;
151  }
152  }
153 
154  imageBuffer = new CByteImage*[numImages];
155  for (unsigned int i = 0 ; i < numImages ; i++)
156  {
157  imageBuffer[i] = visionx::tools::createByteImage(imageProviderInfo);
158  }
159  rgbImageBuffer = visionx::tools::createByteImage(imageProviderInfo);
160  maskedrgbImageBuffer = visionx::tools::createByteImage(imageProviderInfo);
161  ::ImageProcessor::Zero(maskedrgbImageBuffer);
162  depthImageBuffer = visionx::tools::createByteImage(imageProviderInfo);
163  ::ImageProcessor::Zero(depthImageBuffer);
164  openPoseResultImage = new CByteImage*[1];
165  openPoseResultImage[0] = visionx::tools::createByteImage(imageProviderInfo);
166  enableResultImages(1, imageProviderInfo.imageFormat.dimension, imageProviderInfo.imageFormat.type);
167 
168  offeringTopic(getProperty<std::string>("OpenPoseEstimation2DTopicName").getValue());
169  listener2DPrx = getTopic<OpenPose2DListenerPrx>(getProperty<std::string>("OpenPoseEstimation2DTopicName").getValue());
170  offeringTopic(getProperty<std::string>("OpenPoseEstimation3DTopicName").getValue());
171  listener3DPrx = getTopic<OpenPose3DListenerPrx>(getProperty<std::string>("OpenPoseEstimation3DTopicName").getValue());
172 
173  // Calibration
174  stereoCalibration = nullptr;
175  calibration = nullptr;
176  {
177  ARMARX_VERBOSE << "Trying to get StereoCalibrationInterface proxy: " << getIceManager()->getCommunicator()->proxyToString(imageProviderInfo.proxy);
178  visionx::StereoCalibrationInterfacePrx calib = visionx::StereoCalibrationInterfacePrx::checkedCast(imageProviderInfo.proxy);
179  if (!calib)
180  {
181  ARMARX_WARNING << "Image provider does not provide a stereo calibration - 3D will not work - " << imageProviderInfo.proxy->ice_ids();
182 
183  }
184  else
185  {
186  ARMARX_VERBOSE << "got StereoCalibrationInterface proxy" ;
187  stereoCalibration = visionx::tools::convert(calib->getStereoCalibration());
188  ARMARX_VERBOSE << "got StereoCalibration" ;
189  calibration = stereoCalibration->GetLeftCalibration();
190  ARMARX_VERBOSE << "got mono Calibration" ;
191  }
192  }
193 
194  debugDrawerTopic = getTopic<armarx::DebugDrawerInterfacePrx>(getProperty<std::string>("DebugDrawerTopicName").getValue());
195  debugDrawerTopic->clearLayer(layerName);
196  ARMARX_VERBOSE << "Trying to get RobotStateComponent proxy";
197  // Trying to access robotStateComponent if it is avaiable
198  robotStateInterface = getProxy<RobotStateComponentInterfacePrx>(getProperty<std::string>("RobotStateComponentName").getValue(), false, "", false);
199  if (robotStateInterface)
200  {
201  localRobot = RemoteRobot::createLocalClone(robotStateInterface);
202  ARMARX_VERBOSE << "RobotStateComponent available";
203  }
204  else
205  {
206  ARMARX_ERROR << "RobotStateCompontent is not avaiable, 3D-Estimation will be deactivated!!";
207  }
208 
210  {
211  imageKeypointBuffer.reset(new ImageKeypointBuffer(imageProviderInfo));
212  }
213 
214  if (getProperty<bool>("ActivateOnStartup").getValue())
215  {
216  running2D = true;
217  task2DKeypoints = new RunningTask<OpenPoseEstimation>(this, &OpenPoseEstimation::run);
218  task2DKeypoints->start();
219 
220  running3D = true;
221  }
222  else
223  {
224  running2D = false;
225  running3D = false;
226  }
227  ARMARX_INFO << "OpenPoseEstimation connect done";
228 }
229 
231 {
232  running2D = false;
233  task2DKeypoints->stop();
234  running3D = false;
235 
236  delete rgbImageBuffer;
237  delete maskedrgbImageBuffer;
238  delete depthImageBuffer;
239  delete[] imageBuffer;
240  delete[] openPoseResultImage;
241  delete stereoCalibration;
242 
243  imageKeypointBuffer.reset();
244 }
245 
247 {
248 }
249 
251 {
252  if (running2D)
253  {
254  if (!waitForImages(providerName))
255  {
256  ++timeoutCounter2d;
257  ARMARX_WARNING << "Timeout or error in wait for images"
258  << " (#timeout " << timeoutCounter2d
259  << ", #read error " << readErrorCounter2d
260  << ", #success " << sucessCounter2d << ")";
261  }
262  else
263  {
264  if (static_cast<unsigned int>(getImages(providerName, imageBuffer, imageMetaInfo)) != numImages)
265  {
266  ++readErrorCounter2d;
267  ARMARX_WARNING << "Unable to transfer or read images"
268  << " (#timeout " << timeoutCounter2d
269  << ", #read error " << readErrorCounter2d
270  << ", #success " << sucessCounter2d << ")";
271  return;
272  }
273  else
274  {
275  ++sucessCounter2d;
277  {
278  imageKeypointBuffer->addRGBImage(imageBuffer[0], imageMetaInfo->timeProvided);
279  if (imageKeypointBuffer->addDepthImage(imageBuffer[1], imageMetaInfo->timeProvided))
280  {
281  timeProvidedImage = imageMetaInfo->timeProvided;
282  imageUpdated = true;
283  }
284  }
285  else
286  {
287  {
288  std::unique_lock lock_rgb(rgbImageBufferMutex);
289  ::ImageProcessor::CopyImage(imageBuffer[0], rgbImageBuffer);
290  }
291 
292  if (numImages == 2)
293  {
294  {
295  std::unique_lock lock_depth(depthImageBufferMutex);
296  ::ImageProcessor::CopyImage(imageBuffer[1], depthImageBuffer);
297  }
298  }
299  timeProvidedImage = imageMetaInfo->timeProvided;
300  imageUpdated = true;
301  }
302  }
303  }
304  }
305  else
306  {
307  usleep(10000);
308  }
309 }
310 
311 void OpenPoseEstimation::run()
312 {
313  // If we receive Keypoints from Topic, then we don't need to setup the OpenPose-Environment
315  {
316  setupOpenPoseEnvironment();
317  }
318 
319  while (running2D && task2DKeypoints->isRunning())
320  {
321  if (!imageUpdated)
322  {
323  usleep(100);
324  continue;
325  }
326 
327  imageUpdated = false;
328 
329  // 1. If mode is not listening for topic --> create 2D-Keypoints from images
331  {
333  }
334  else if (mode == OpenPoseEstimationMode::FromTopic)
335  {
337  }
338 
339  if (running3D && localRobot && numImages > 1)
340  {
341  // 2. Calculate 3D values
342  RemoteRobot::synchronizeLocalCloneToTimestamp(localRobot, robotStateInterface, timeProvidedImage);
344  {
345  calculate3DFromDepthImage(keypointManager);
346  }
348  {
349  calculate3DFromStereoImage(keypointManager);
350  }
351  else
352  {
353  // ARMARX_ERROR << "Something went really wrong. The mode is in an unexpected state!! Skipping frame...";
354  continue;
355  }
356 
357  // 3. Filter based on workspacePolygon
358  {
359  std::unique_lock lock(keypointManagerMutex);
360  std::vector<KeypointObjectPtr>::iterator iter = keypointManager->getData().begin();
361  while (iter != keypointManager->getData().end())
362  {
363  KeypointObjectPtr p = *iter;
364  // filterKeypointsBasedOnWorkspacePolygon(p);
365  if (p->size() == 0)
366  {
367  iter = keypointManager->getData().erase(iter);
368  }
369  else
370  {
371  iter++;
372  }
373  }
374  }
375 
376  // 4. Filter to nearest objects (optional)
377  if (reportOnlyNearestPerson)
378  {
379  filterToNearest();
380  }
381 
382  // 5. Visualize and report to topic
384 
385  ARMARX_VERBOSE << deactivateSpam(0.5) << "Reporting 2Dkeypoints for " << keypointManager->getData().size() << " objects";
386  listener2DPrx->report2DKeypoints(keypointManager->toIce2D(), timeProvidedImage);
387  listener2DPrx->report2DKeypointsNormalized(keypointManager->toIce2D_normalized(imageProviderInfo.imageFormat.dimension.width, imageProviderInfo.imageFormat.dimension.height), timeProvidedImage);
388  ARMARX_VERBOSE << deactivateSpam(0.5) << "Reporting 3Dkeypoints for " << keypointManager->getData().size() << " objects";
389  listener3DPrx->report3DKeypoints(keypointManager->toIce3D(localRobot), timeProvidedImage);
390  }
391  else
392  {
393  ARMARX_VERBOSE << deactivateSpam(0.5) << "Reporting 2Dkeypoints for " << keypointManager->getData().size() << " objects";
394  listener2DPrx->report2DKeypoints(keypointManager->toIce2D(), timeProvidedImage);
395  listener2DPrx->report2DKeypointsNormalized(keypointManager->toIce2D_normalized(imageProviderInfo.imageFormat.dimension.width, imageProviderInfo.imageFormat.dimension.height), timeProvidedImage);
396  }
397 
398  // Always provide result image
399  provideResultImages(openPoseResultImage, imageMetaInfo);
400  }
401 }
402 
403 void OpenPoseEstimation::setupOpenPoseEnvironment()
404 {
405  ARMARX_INFO << "setting up open pose environment";
406  TIMING_START(setupOpenPose);
407 
408  const op::Point<int> netInputSize = op::flagsToPoint(getProperty<std::string>("OP_net_resolution").getValue(), "-1x368");
409  const op::Point<int> outputSize = op::flagsToPoint(getProperty<std::string>("OP_output_resolution").getValue(), "-1x-1");
410  int scaleNumber = getProperty<int>("OP_scale_number").getValue();
411  double scaleGap = getProperty<double>("OP_scale_gap").getValue();
412  scaleAndSizeExtractor = std::make_shared<op::ScaleAndSizeExtractor>(netInputSize, outputSize, scaleNumber, scaleGap);
413 
414  cvMatToOpInput = std::make_shared<op::CvMatToOpInput>(poseModel);
415  cvMatToOpOutput.reset(new op::CvMatToOpOutput());
416  opOutputToCvMat.reset(new op::OpOutputToCvMat());
417 
418  std::string modelFolder;
419 
420  modelFolder = getProperty<std::string>("OP_model_folder").getValue();
421  auto found = ArmarXDataPath::getAbsolutePath(modelFolder, modelFolder, {"/usr/share/OpenPose/", OPENPOSE_MODELS});
422  if (!found)
423  {
424  ARMARX_ERROR << "Could not find model folder!";
425  return;
426  }
427  if (!modelFolder.empty() && *modelFolder.rbegin() != '/')
428  {
429  modelFolder += "/";
430  }
431  ARMARX_INFO << "Found model path at: " << modelFolder;
432  int numGpuStart = getProperty<int>("OP_num_gpu_start").getValue();
433  poseExtractorCaffe = std::make_shared<op::PoseExtractorCaffe>(poseModel, modelFolder, numGpuStart);
434  poseExtractorCaffe->initializationOnThread();
435 
436  TIMING_END(setupOpenPose);
437 }
438 
439 OpenPoseEstimation::PoseKeypoints OpenPoseEstimation::getPoseKeypoints(CByteImage* imageBuffer)
440 {
441  // Step 1 - Convert image to cv::Mat
442  IplImage* iplImage = IplImageAdaptor::Adapt(imageBuffer);
443  cv::Mat inputImage = cv::cvarrToMat(iplImage);
444  const op::Point<int> imageSize {inputImage.cols, inputImage.rows};
445 
446  // Step 2 - Get desired scale sizes
447  std::vector<double> scaleInputToNetInputs;
448  std::vector<op::Point<int>> netInputSizes;
449  double scaleInputToOutput;
450  op::Point<int> outputResolution;
451  std::tie(scaleInputToNetInputs, netInputSizes, scaleInputToOutput, outputResolution) = scaleAndSizeExtractor->extract(imageSize);
452 
453  // Step 3 - Format input image to OpenPose input and output formats
454  const auto netInputArray = cvMatToOpInput->createArray(inputImage, scaleInputToNetInputs, netInputSizes);
455 
456  // Step 4 - Estimate poseKeypoints
457  poseExtractorCaffe->forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
458  const PoseKeypoints poseKeypoints = poseExtractorCaffe->getPoseKeypoints();
459 
460  return poseKeypoints;
461 }
462 
463 
464 void OpenPoseEstimation::Render2DResultImage(const CByteImage& inputImage, const CByteImage& maskedInputImage, KeypointManagerPtr keypointManager, CByteImage& resultImage, op::PoseModel poseModel, float renderThreshold, int brightnessIncrease)
465 {
466  IdNameMap m1 = keypointManager->getIdNameMap();
467  IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
468  ARMARX_CHECK_EXPRESSION(m1.size() == poseBodyPartMapping.size() && std::equal(m1.begin(), m1.end(), poseBodyPartMapping.begin())) <<
469  "The idNameMap used in the keypointManager must be equal to the poseBodyPartMapping defined by the given poseModel.";
470 
471  // Creating openpose-representation of keypoints stored in keypointManager
472  const int objects = keypointManager->getData().size();
473  const int numberKeypoints = m1.size();
474 
475  PoseKeypoints opKeypoints({objects, numberKeypoints, 3});
476  int objectNumber = 0;
477  for (KeypointObjectPtr obj : keypointManager->getData())
478  {
479  for (int i = 0; i < numberKeypoints; i++)
480  {
481  const int index = (objectNumber * numberKeypoints + i) * opKeypoints.getSize(2);
482  KeypointPtr p = obj->getNode(static_cast<unsigned int>(i));
483  if (p)
484  {
485  opKeypoints[index] = p->get2D()._x;
486  opKeypoints[index + 1] = p->get2D()._y;
487  opKeypoints[index + 2] = p->getConfidence();
488  }
489  else
490  {
491  opKeypoints[index] = 0.0f;
492  opKeypoints[index + 1] = 0.0f;
493  opKeypoints[index + 2] = 0.0f;
494  }
495 
496  }
497  objectNumber++;
498  }
499  std::cout << opKeypoints.toString() << std::endl;
500 
501  // Render image
502  Render2DResultImage(inputImage, maskedInputImage, opKeypoints, resultImage, poseModel, renderThreshold, brightnessIncrease);
503 }
504 
505 
506 void OpenPoseEstimation::VisualizeTransparentImageMask(CByteImage& resultImage, const CByteImage& maskedInputImage, int brightnessIncrease, const CByteImage& inputImage)
507 {
508  Ice::Byte mask[3] = {0, 255, 0};
509  auto pixelCount = inputImage.width * inputImage.height;
510 
511  // first create a smoothed mask
512  CByteImage imageMask(maskedInputImage.width, maskedInputImage.height, CByteImage::eGrayScale);
513  CByteImage smoothedImageMask(&imageMask);
514  ::ImageProcessor::Zero(&imageMask);
515  if (brightnessIncrease > 0)
516  {
517  for (int i = 0; i < pixelCount; i += 1)
518  {
519  if (maskedInputImage.pixels[i * 3] == mask[0] &&
520  maskedInputImage.pixels[i * 3 + 1] == mask[1] && maskedInputImage.pixels[i * 3 + 2] == mask[2])
521  {
522  imageMask.pixels[i] = 255;
523  }
524 
525  }
526  ::ImageProcessor::GaussianSmooth5x5(&imageMask, &smoothedImageMask);
527  }
528  // now apply mask to image
529  for (int i = 0; i < pixelCount; i += 1)
530  {
531  if (brightnessIncrease > 0)
532  {
533  float perc = static_cast<float>(smoothedImageMask.pixels[i]) / 255.f;
534  int effectiveBrightnessIncrease = brightnessIncrease * perc;
535  resultImage.pixels[i * 3] = std::min<int>(resultImage.pixels[i * 3]
536  + effectiveBrightnessIncrease, 255);
537  resultImage.pixels[i * 3 + 1] = std::min<int>(resultImage.pixels[i * 3 + 1]
538  + effectiveBrightnessIncrease, 255);
539  resultImage.pixels[i * 3 + 2] = std::min<int>(resultImage.pixels[i * 3 + 2]
540  + effectiveBrightnessIncrease, 255);
541 
542  }
543  // use original mask
544  else if (brightnessIncrease < 0)
545  {
546  if (maskedInputImage.pixels[i * 3] == mask[0] &&
547  maskedInputImage.pixels[i * 3 + 1] == mask[1] && maskedInputImage.pixels[i * 3 + 2] == mask[2])
548  {
549  resultImage.pixels[i * 3] = maskedInputImage.pixels[i * 3 ];
550  resultImage.pixels[i * 3 + 1] = maskedInputImage.pixels[i * 3 + 1];
551  resultImage.pixels[i * 3 + 2] = maskedInputImage.pixels[i * 3 + 2];
552  }
553  }
554 
555 
556  }
557 }
558 
559 void OpenPoseEstimation::Render2DResultImage(const CByteImage& inputImage, const CByteImage& maskedInputImage, OpenPoseEstimation::PoseKeypoints& keypoints, CByteImage& resultImage, op::PoseModel poseModel, float renderThreshold, int brightnessIncrease)
560 {
561  ARMARX_CHECK_EXPRESSION(inputImage.width == resultImage.width);
562  ARMARX_CHECK_EXPRESSION(inputImage.height == resultImage.height);
563  ::ImageProcessor::CopyImage(&inputImage, &resultImage);
564 
565  VisualizeTransparentImageMask(resultImage, maskedInputImage, brightnessIncrease, inputImage);
566 
567  if (keypoints.getSize().empty())
568  {
569  return;
570  }
571 
572  const std::vector<unsigned int>& posePartPairs = op::getPoseBodyPartPairsRender(poseModel);
573 
574  // Get colors for points and lines from openpose
575  std::vector<float> keypointColors = op::getPoseColors(poseModel);
576 
577  const auto thicknessCircleRatio = 1.f / 75.f;
578  const auto thicknessLineRatioWRTCircle = 0.75f;
579  const auto area = inputImage.width * inputImage.height;
580  const int numberKeypoints = keypoints.getSize(1);
581 
582  for (int person = 0 ; person < keypoints.getSize(0) ; person++)
583  {
584  const auto personRectangle = op::getKeypointsRectangle(keypoints, person, 0.1f);
585  if (personRectangle.area() > 0)
586  {
587  // define size-dependent variables
588  const auto ratioAreas = op::fastMin(1.f, op::fastMax(personRectangle.width / static_cast<float>(inputImage.width),
589  personRectangle.height / static_cast<float>(inputImage.height)));
590  const auto thicknessRatio = op::fastMax(positiveIntRound<float>(static_cast<float>(std::sqrt(area))
591  * thicknessCircleRatio * ratioAreas), 2);
592  // Negative thickness in ivt::drawCircle means that a filled circle is to be drawn.
593  const auto thicknessCircle = op::fastMax(1, (ratioAreas > 0.05f ? thicknessRatio : -1));
594  const auto thicknessLine = op::fastMax(1, positiveIntRound(thicknessRatio * thicknessLineRatioWRTCircle));
595  const auto radius = thicknessRatio / 2;
596 
597  // Draw Lines
598  for (unsigned int i = 0; i < posePartPairs.size(); i = i + 2)
599  {
600  const int index1 = (person * numberKeypoints + static_cast<int>(posePartPairs[i])) * keypoints.getSize(2);
601  const int index2 = (person * numberKeypoints + static_cast<int>(posePartPairs[i + 1])) * keypoints.getSize(2);
602 
603  float x1 = keypoints[index1 + 0];
604  float y1 = keypoints[index1 + 1];
605  float x2 = keypoints[index2 + 0];
606  float y2 = keypoints[index2 + 1];
607 
608  if (!(x1 == 0.0f && y1 == 0.0f) && !(x2 == 0.0f && y2 == 0.0f))
609  {
610  if (keypoints[index1 + 2] > renderThreshold && keypoints[index2 + 2] > renderThreshold)
611  {
612  unsigned int colorIndex = posePartPairs[i + 1] * 3;
613  ::PrimitivesDrawer::DrawLine(&resultImage,
614  Vec2d({x1, y1}),
615  Vec2d({x2, y2}),
616  static_cast<int>(keypointColors[colorIndex + 2]),
617  static_cast<int>(keypointColors[colorIndex + 1]),
618  static_cast<int>(keypointColors[colorIndex + 0]),
619  thicknessLine);
620  }
621  }
622  }
623 
624  // Draw points
625  for (int i = 0; i < numberKeypoints; i++)
626  {
627  const int index = (person * numberKeypoints + i) * keypoints.getSize(2);
628  float x = keypoints[index + 0];
629  float y = keypoints[index + 1];
630 
631  if (!(x == 0.0f && y == 0.0f) && keypoints[index + 2] > renderThreshold)
632  {
633  unsigned int colorIndex = static_cast<unsigned int>(i * 3);
634 
635  ::PrimitivesDrawer::DrawCircle(&resultImage,
636  x,
637  y,
638  radius,
639  static_cast<int>(keypointColors[colorIndex + 2]),
640  static_cast<int>(keypointColors[colorIndex + 1]),
641  static_cast<int>(keypointColors[colorIndex + 0]),
642  thicknessCircle);
643  }
644  }
645  }
646  }
647 }
648 
649 void OpenPoseEstimation::start(const Ice::Current&)
650 {
651  if (running2D)
652  {
653  return;
654  }
655  else
656  {
657  ARMARX_INFO << "Starting OpenposeEstimation";
658  running2D = true;
659  task2DKeypoints = new RunningTask<OpenPoseEstimation>(this, &OpenPoseEstimation::run);
660  task2DKeypoints->start();
661  }
662 }
663 
664 void OpenPoseEstimation::stop(const Ice::Current&)
665 {
666  if (running2D)
667  {
668  ARMARX_INFO << "Stopping OpenposeEstimation";
669  running2D = false;
670  task2DKeypoints->stop(true);
672  }
673 }
674 
676 {
677  if (running3D)
678  {
679  return;
680  }
681  else
682  {
683  ARMARX_INFO << "Starting OpenposeEstimation -- 3D";
684 
685  running3D = true;
686  start();
687  }
688 }
689 
691 {
692  if (running3D)
693  {
694  ARMARX_INFO << "Stopping OpenposeEstimation -- 3D";
695  running3D = false;
696 
697  // Remove all layers we might have used
698  for (int i = 0; i < MAX_LAYER; i++)
699  {
700  debugDrawerTopic->removeLayer(layerName + std::to_string(i));
701  }
702  }
703 }
704 
705 
706 void OpenPoseEstimation::maskOutBasedOnDepth(CByteImage& image, int maxDepth)
707 {
708  std::unique_lock lock(depthImageBufferMutex);
709 
710  if (maxDepth <= 0)
711  {
712  return;
713  }
714  int pixelCount = depthImageBuffer->width * depthImageBuffer->height;
715  int depthThresholdmm = maxDepth;
716  CByteImage maskImage(depthImageBuffer->width, depthImageBuffer->height, CByteImage::eGrayScale);
717  for (int i = 0; i < pixelCount; i += 1)
718  {
719  int z_value = depthImageBuffer->pixels[i * 3 + 0]
720  + (depthImageBuffer->pixels[i * 3 + 1] << 8)
721  + (depthImageBuffer->pixels[i * 3 + 2] << 16);
722  maskImage.pixels[i] = z_value > depthThresholdmm || z_value == 0 ? 0 : 255;
723 
724  }
725  ::ImageProcessor::Erode(&maskImage, &maskImage, 5);
726  ::ImageProcessor::Dilate(&maskImage, &maskImage, 20);
727  for (int i = 0; i < pixelCount; i += 1)
728  {
729  if (maskImage.pixels[i] == 0)
730  {
731  image.pixels[i * 3] = 0;
732  image.pixels[i * 3 + 1] = 255;
733  image.pixels[i * 3 + 2] = 0;
734 
735  }
736  }
737 }
738 
739 DrawColor24Bit OpenPoseEstimation::getDominantColorOfPatch(const CByteImage& image, const Vec2d& point, int windowSize) const
740 {
741  if (point.x < 0 || point.y < 0 || point.x >= image.width || point.y >= image.height)
742  return DrawColor24Bit {0, 0, 0}; // Black
743  int divisor = 256 / 3; // split into 3*3*3 bins
744  typedef std::tuple<Ice::Byte, Ice::Byte, Ice::Byte> RGBTuple;
745  std::map<RGBTuple, int> histogram;
746  int halfWindowSize = static_cast<int>(windowSize * 0.5);
747  int left = std::max<int>(0, static_cast<int>(point.x) - halfWindowSize);
748  int top = std::max<int>(0, static_cast<int>(point.y) - halfWindowSize);
749  int right = std::min<int>(image.width, static_cast<int>(point.x) + halfWindowSize);
750  int bottom = std::min<int>(image.height, static_cast<int>(point.y) + halfWindowSize);
751 
752  for (int x = left; x < right; x++)
753  {
754  for (int y = top; y < bottom; y++)
755  {
756  int pixelPos = (y * image.width + x) * 3;
757  auto tuple = std::make_tuple<Ice::Byte, Ice::Byte, Ice::Byte>(static_cast<Ice::Byte>(image.pixels[pixelPos] / divisor),
758  static_cast<Ice::Byte>(image.pixels[pixelPos + 1] / divisor),
759  static_cast<Ice::Byte>(image.pixels[pixelPos + 2] / divisor));
760  if (histogram.count(tuple))
761  {
762  histogram.at(tuple)++;
763  }
764  else
765  {
766  histogram[tuple] = 1;
767  }
768  }
769  }
770 
771  float maxHistogramValue = 0;
772  RGBTuple dominantColor;
773  for (auto& pair : histogram)
774  {
775  if (pair.second > maxHistogramValue)
776  {
777  dominantColor = pair.first;
778  maxHistogramValue = pair.second;
779  }
780  }
781  auto rgb = DrawColor24Bit {static_cast<Ice::Byte>(std::get<0>(dominantColor) * divisor),
782  static_cast<Ice::Byte>(std::get<1>(dominantColor) * divisor),
783  static_cast<Ice::Byte>(std::get<2>(dominantColor) * divisor)};
784  return rgb;
785 
786 }
787 
788 void OpenPoseEstimation::onMessage(const Texting::TextMessage& text, const Ice::Current&)
789 {
790  using json = nlohmann::json;
791  // parsing message
792  json jsonView = json::parse(text.message);
793 
794  // Reading values
795  long timestamp = jsonView["timestamp"].get<long>();
796  json jsonValues = jsonView["values"];
797 
798  // Constructing and populating KeypointManager
799  IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
800  KeypointManagerPtr manager = std::make_shared<KeypointManager>(poseBodyPartMapping);
801  KeypointObjectPtr object = std::make_shared<KeypointObject>();
802 
803  float adjustingWidth = static_cast<float>(imageProviderInfo.imageFormat.dimension.width) / static_cast<float>(this->incomingKeypointDimensions.width);
804  float adjustingHeight = static_cast<float>(imageProviderInfo.imageFormat.dimension.height) / static_cast<float>(this->incomingKeypointDimensions.height);
805 
806  for (json::iterator it = jsonValues.begin(); it != jsonValues.end(); ++it)
807  {
808  std::string jsonLabel = it.key();
809 
810  auto pair = MPII_TO_MPI.at(jsonLabel);
811  unsigned int id = pair.first;
812  std::string name = pair.second;
813 
814  json jsonPosition = it.value();
815  json jsonPoint = jsonPosition["position"];
816  float x = jsonPoint["y"]; // wrong naming in secondHands output
817  float y = jsonPoint["x"];
818 
819  // Adjust Keypoints to fit to image dimensions
820  x *= adjustingWidth;
821  y *= adjustingHeight;
822 
823  if (x > 0.0f && y > 0.0f)
824  {
825  KeypointPtr keypoint = std::make_shared<Keypoint>(x,
826  y,
827  id,
828  name,
829  1.0f); // Since to Topic from SecondHands does not provide a confidence, we simply use 1 for every keypoint;
830  object->insertNode(keypoint);
831  }
832  }
833  manager->getData().push_back(object);
834 
835  // Inserting into buffer
836  if (imageKeypointBuffer->addKeypoints(manager, timestamp))
837  {
838  timeProvidedImage = timestamp;
839  imageUpdated = true;
840  }
841 }
842 
844 {
845  IdNameMap poseBodyPartMapping = getPoseBodyPartMapping(poseModel);
846  KeypointManagerPtr manager = std::make_shared<KeypointManager>(poseBodyPartMapping);
847 
848  if (keypoints.getSize().empty())
849  {
850  debugDrawerTopic->removeLayer(layerName + std::to_string(layerCounter)); // TODO find better way to clear if no persons are visible
851  return manager;
852  }
853 
854  int entities = keypoints.getSize().at(0);
855  int points = keypoints.getSize().at(1);
856  ARMARX_CHECK_EXPRESSION(points == static_cast<int>(poseBodyPartMapping.size()) - 1);
857 
858  for (int i = 0; i < entities; i++)
859  {
860  KeypointObjectPtr object = std::make_shared<KeypointObject>();
861  for (int id = 0; id < points; id++)
862  {
863  float x = keypoints.at({i, id, 0});
864  float y = keypoints.at({i, id, 1});
865  if (x == 0.0f && y == 0.0f)
866  {
867  continue; // Invalid keypoint from openpose
868  }
869 
870  KeypointPtr keypoint = std::make_shared<Keypoint>(x,
871  y,
872  static_cast<unsigned int>(id),
873  poseBodyPartMapping.at(static_cast<unsigned int>(id)),
874  static_cast<float>(keypoints.at({i, id, 2})));
875 
876  keypoint->setDominantColor(getDominantColorOfPatch(rgbImage, Vec2d {x, y}, rgbImage.width / 50));
877  object->insertNode(keypoint);
878  }
879 
880  if (static_cast<int>(object->size()) >= minimalValidKeypoints)
881  {
882  manager->getData().push_back(object);
883  }
884  }
885 
886  return manager;
887 }
888 
890 {
891  if (!localRobot)
892  {
893  return;
894  }
895 
896  for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
897  {
898  KeypointPtr point = object->getNode(i);
899  if (point && point->is3DSet())
900  {
901  FramedPositionPtr pos = point->get3D()->toGlobal(localRobot); // Changing frame of copy
902  if (!workspacePolygon.isInside(pos))
903  {
904  ARMARX_VERBOSE << deactivateSpam(1) << "removing keypoint due out of workspace";
905  object->removeNode(i);
906  }
907  }
908  }
909 }
910 
912 {
913  this->keypointManager->filterToNearestN(1);
914 }
915 
917 {
918  std::unique_lock lock(keypointManagerMutex);
920  {
921  // OpenPoseCall nur auf erstem Bild
922  std::unique_lock lock_rgb(rgbImageBufferMutex);
923  // remove pixels that are far away
924  ::ImageProcessor::CopyImage(rgbImageBuffer, maskedrgbImageBuffer);
925  if (numImages > 1)
926  {
927  maskOutBasedOnDepth(*maskedrgbImageBuffer, getProperty<int>("MaxDepth").getValue());
928  }
929  PoseKeypoints p = getPoseKeypoints(maskedrgbImageBuffer);
930  keypointManager = generate2DKeypoints(p, maskedrgbImageBuffer);
931 
932  // Render result image
933  {
934  std::unique_lock lock(resultImageBufferMutex);
935  OpenPoseEstimation::Render2DResultImage(*rgbImageBuffer, *maskedrgbImageBuffer, p, *openPoseResultImage[0], poseModel, renderThreshold, getProperty<int>("MaskBrightnessIncrease").getValue());
936  }
937  }
939  {
940  // OpenPoseCall auf beiden Bildern
941  std::unique_lock lock_rgb(rgbImageBufferMutex);
942  std::unique_lock lock_depth(depthImageBufferMutex);
943  PoseKeypoints left = getPoseKeypoints(rgbImageBuffer);
944  keypointManager = generate2DKeypoints(left, rgbImageBuffer);
945  PoseKeypoints right = getPoseKeypoints(depthImageBuffer);
946  KeypointManagerPtr m2 = generate2DKeypoints(right, depthImageBuffer);
947  keypointManager->matchWith(m2);
948 
949  // Render result imageint)
950  {
951  std::unique_lock lock(resultImageBufferMutex);
952  OpenPoseEstimation::Render2DResultImage(*rgbImageBuffer, *rgbImageBuffer, left, *openPoseResultImage[0], poseModel, renderThreshold, getProperty<int>("MaskBrightnessIncrease").getValue());
953  }
954  }
955 }
956 
958 {
959  // ImageKeypointBuffer contains a triple for the timestamp saved in this->timeProvidedImage
960 
961  ImageKeypointBuffer::ImageKeypointTriplePtr imageKeypointTriple = imageKeypointBuffer->getTripleAtTimestamp(timeProvidedImage, true);
962  std::unique_lock lock_rgb(rgbImageBufferMutex);
963  ::ImageProcessor::CopyImage(imageKeypointTriple->rgbImage, rgbImageBuffer);
964  std::unique_lock lock_depth(depthImageBufferMutex);
965  ::ImageProcessor::CopyImage(imageKeypointTriple->depthImage, depthImageBuffer);
966  keypointManager = imageKeypointTriple->keypoints;
967 
968  // Render result image
969  {
970  std::unique_lock lock(resultImageBufferMutex);
971  OpenPoseEstimation::Render2DResultImage(*rgbImageBuffer, *rgbImageBuffer, keypointManager, *openPoseResultImage[0], poseModel, renderThreshold, getProperty<int>("MaskBrightnessIncrease").getValue());
972  }
973 }
974 
976 {
977  if (!localRobot)
978  {
979  return;
980  }
981 
982  const int depthThreshold = getProperty<int>("MaxDepthDifference").getValue();
983 
984  for (KeypointObjectPtr object : manager->getData())
985  {
986  if (object->size() == 0)
987  {
988  continue;
989  }
990 
991  std::map<unsigned int, int> depthStorage; // we do not want to store perspective depth-values in a Keypoint, we transfer them into world-coordiantes (in cameraframe) anyways
992  std::vector<int> depthsCopy;
993 
994  // Get camera depth information from image coordinates
995  for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
996  {
997  KeypointPtr point = object->getNode(i);
998  if (point)
999  {
1000  int depth = getMedianDepthFromImage(static_cast<int>(point->get2D()._x), static_cast<int>(point->get2D()._y), radius);
1001  depthStorage[i] = depth;
1002  depthsCopy.push_back(depth);
1003  }
1004  }
1005 
1006  // Find outlier in depth values and set them to median of depth values
1007  std::sort(depthsCopy.begin(), depthsCopy.end());
1008  const int medianDepth = depthsCopy.at(depthsCopy.size() / 2);
1009  for (auto& m : depthStorage)
1010  {
1011  int depth = m.second;
1012  if (depth > medianDepth + depthThreshold || depth < medianDepth - depthThreshold)
1013  {
1014  m.second = medianDepth;
1015  }
1016  }
1017 
1018  // Transform pixel + depth into world coordinates
1019  for (unsigned int i = 0; i < op::getPoseBodyPartMapping(poseModel).size(); i++)
1020  {
1021  KeypointPtr point = object->getNode(i);
1022  if (point)
1023  {
1024  Vec2d imagePoint;
1025  imagePoint.x = point->get2D()._x;
1026  imagePoint.y = point->get2D()._y;
1027  Vec3d result;
1028  ARMARX_CHECK_EXPRESSION(calibration);
1029  calibration->ImageToWorldCoordinates(imagePoint, result, static_cast<float>(depthStorage.at(i)), useDistortionParameters);
1030 
1031  point->set3D(new FramedPosition(Eigen::Vector3f(result.x, result.y, result.z), cameraNodeName, localRobot->getName()));
1032  }
1033  }
1034  }
1035 }
1036 
1038 {
1039  for (KeypointObjectPtr object : manager->getData())
1040  {
1041  for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1042  {
1043  KeypointPtr point = object->getNode(i);
1044  if (point)
1045  {
1046  ARMARX_CHECK_EXPRESSION(point->isStereo());
1047  auto pair = point->getStereo2D();
1048  const Vec2d imagePointL = {pair.first._x, pair.first._y};
1049  const Vec2d imagePointR = {pair.second._x, pair.second._y};
1050  ARMARX_DEBUG << "PointL: " << imagePointL.x << " " << imagePointL.y;
1051  ARMARX_DEBUG << "PointR: " << imagePointR.x << " " << imagePointR.y;
1052 
1053  Vec3d result;
1054  PointPair3d* rD = new PointPair3d();
1055  stereoCalibration->Calculate3DPoint(imagePointL, imagePointR, result, false, useDistortionParameters, rD);
1056  ARMARX_DEBUG << "IVT- ConnectionLine: " << rD->p1.x << " " << rD->p1.y << " " << rD->p1.z << " --- " << rD->p2.x << " " << rD->p2.y << " " << rD->p2.z;
1057 
1058  point->set3D(new FramedPosition(Eigen::Vector3f(result.x, result.y, result.z), cameraNodeName, localRobot->getName()));
1059  }
1060  }
1061  }
1062 }
1063 
1065 {
1066  std::unique_lock lock(keypointManagerMutex);
1067  debugDrawerTopic->removeLayer(layerName + std::to_string(layerCounter));
1068 
1069  viz::Layer openPoseArVizLayer = arviz.layer(layerName);
1070  if (keypointManager->getData().empty() || !localRobot)
1071  {
1072  arviz.commit(openPoseArVizLayer);
1073  return;
1074  }
1075 
1076  const std::vector<unsigned int>& posePartPairs = op::getPoseBodyPartPairsRender(poseModel);
1077  const std::vector<float> keypointColors = op::getPoseColors(poseModel);
1078 
1079 
1080 
1081  int human = 1;
1082 
1083  layerCounter = (layerCounter == MAX_LAYER - 1) ? 0 : layerCounter + 1;
1084 
1085  int objectIndex = 0;
1086  for (KeypointObjectPtr object : keypointManager->getData())
1087  {
1088  std::string objectName = "object_" + std::to_string(objectIndex) + "_";
1089 
1090  // Draw Lines
1091  for (unsigned int i = 0; i < posePartPairs.size(); i = i + 2)
1092  {
1093  KeypointPtr point1 = object->getNode(posePartPairs[i]);
1094  KeypointPtr point2 = object->getNode(posePartPairs[i + 1]);
1095 
1096  if (point1 && point2)
1097  {
1098  FramedPositionPtr p1 = point1->get3D()->toGlobal(localRobot);
1099  FramedPositionPtr p2 = point2->get3D()->toGlobal(localRobot);
1100 
1101  std::string name = point1->getName() + "_" + point2->getName();
1102  unsigned int colorIndex = posePartPairs[i + 1] * 3;
1103  DrawColor color({keypointColors[colorIndex + 2] / 255.f, keypointColors[colorIndex + 1] / 255.f, keypointColors[colorIndex] / 255.f, 1.0f});
1104  debugDrawerTopic->setLineVisu(layerName + std::to_string(layerCounter), objectName + name, new Vector3(p1->x, p1->y, p1->z), new Vector3(p2->x, p2->y, p2->z), 10, color);
1105  }
1106  }
1107 
1108  Keypoint3DMap kpm;
1109 
1110  // Draw Points
1111  for (unsigned int i = 0; i < getPoseBodyPartMapping(poseModel).size(); i++)
1112  {
1113  KeypointPtr point = object->getNode(i);
1114  if (point)
1115  {
1116  FramedPositionPtr p = point->get3D()->toGlobal(localRobot);
1117  auto color24Bit = point->getDominantColor();
1118  auto color = DrawColor {0.0039215f * color24Bit.b, 0.0039215f * color24Bit.g, 0.0039215f * color24Bit.r, 1.0f};
1119  debugDrawerTopic->setSphereVisu(layerName + std::to_string(layerCounter), objectName + point->getName(), new Vector3(p->x, p->y, p->z), color, 20);
1120  Keypoint3D kp;
1121  kp.label = point->getName();
1122  kp.x = p->x;
1123  kp.y = p->y;
1124  kp.z = p->z;
1125  kp.confidence = point->getConfidence();
1126  kpm[point->getName()] = kp;
1127  }
1128  }
1129 
1130  visionx::viz::HumanPoseBody25::addPoseToLayer(kpm, openPoseArVizLayer, "human" + std::to_string(human));
1131  human++;
1132 
1133  }
1134 
1135  arviz.commit(openPoseArVizLayer);
1136 }
1137 
1138 
1139 int OpenPoseEstimation::getMedianDepthFromImage(int x, int y, int radius) const
1140 {
1141  std::vector<int> depths;
1142  for (int xoffset = -radius; xoffset < radius; xoffset++)
1143  {
1144  int xo = x + xoffset;
1145  if (xo < 0 || xo > depthImageBuffer->width)
1146  {
1147  continue;
1148  }
1149  for (int yoffset = -radius; yoffset < radius; yoffset++)
1150  {
1151  int yo = y + yoffset;
1152  if (yo < 0 || yo > depthImageBuffer->height)
1153  {
1154  continue;
1155  }
1156 
1157  // Check whether (x,y) is in circle:
1158  if (xoffset * xoffset + yoffset * yoffset <= radius * radius)
1159  {
1160  unsigned int pixelPos = static_cast<unsigned int>(3 * (yo * depthImageBuffer->width + xo));
1161  int z_value = depthImageBuffer->pixels[pixelPos + 0]
1162  + (depthImageBuffer->pixels[pixelPos + 1] << 8)
1163  + (depthImageBuffer->pixels[pixelPos + 2] << 16);
1164  if (z_value > 0)
1165  {
1166  depths.push_back(z_value);
1167  }
1168  }
1169  }
1170  }
1171  std::sort(depths.begin(), depths.end());
1172 
1173  return depths.empty() ? 0 : depths[depths.size() / 2];
1174 }
1175 
GfxTL::sqrt
VectorXD< D, T > sqrt(const VectorXD< D, T > &a)
Definition: VectorXD.h:662
RemoteRobot.h
armarx::OpenPoseEstimation::Render2DResultImage
static void Render2DResultImage(const CByteImage &inputImage, const CByteImage &maskedInputImage, KeypointManagerPtr keypointManager, CByteImage &resultImage, op::PoseModel poseModel, float renderThreshold, int brightnessIncrease=100)
Definition: OpenPoseEstimation.cpp:464
armarx::viz::Client::commit
CommitResult commit(StagedCommit const &commit)
Definition: Client.cpp:80
ARMARX_VERBOSE
#define ARMARX_VERBOSE
Definition: Logging.h:180
armarx::ManagedIceObject::getIceManager
IceManagerPtr getIceManager() const
Returns the IceManager.
Definition: ManagedIceObject.cpp:353
TIMING_START
#define TIMING_START(name)
Definition: TimeUtil.h:280
armarx::OpenPoseEstimation::start3DPoseEstimation
void start3DPoseEstimation(const Ice::Current &=Ice::emptyCurrent) override
Definition: OpenPoseEstimation.cpp:675
TIMING_END
#define TIMING_END(name)
Definition: TimeUtil.h:296
visionx::ImageProviderInfo::numberImages
int numberImages
Number of images.
Definition: ImageProcessor.h:506
armarx::FromStereoImage
@ FromStereoImage
Definition: OpenPoseEstimation.h:62
index
uint8_t index
Definition: EtherCATFrame.h:59
armarx::toInt
int toInt(const std::string &input)
Definition: StringHelpers.cpp:108
armarx::OpenPoseEstimation::stop
void stop(const Ice::Current &=Ice::emptyCurrent) override
Definition: OpenPoseEstimation.cpp:664
armarx::OpenPoseEstimation::PoseKeypoints
op::Array< float > PoseKeypoints
Definition: OpenPoseEstimation.h:153
armarx::OpenPoseEstimation::onInitImageProcessor
void onInitImageProcessor() override
Setup the vision component.
Definition: OpenPoseEstimation.cpp:75
armarx::KeypointManagerPtr
std::shared_ptr< KeypointManager > KeypointManagerPtr
Definition: KeypointManager.h:159
armarx::Split
std::vector< std::string > Split(const std::string &source, const std::string &splitBy, bool trimElements=false, bool removeEmptyElements=false)
Definition: StringHelperTemplates.h:35
visionx::ImageProcessor::getImageProvider
ImageProviderInfo getImageProvider(std::string name, ImageType destinationImageType=eRgb, bool waitForProxy=false)
Select an ImageProvider.
Definition: ImageProcessor.cpp:152
GfxTL::Vec2d
VectorXD< 2, double > Vec2d
Definition: VectorXD.h:694
armarx::KeypointObjectPtr
std::shared_ptr< KeypointObject > KeypointObjectPtr
Definition: KeypointManager.h:124
armarx::OpenPoseEstimation::process
void process() override
Process the vision component.
Definition: OpenPoseEstimation.cpp:250
visionx::ImageProviderInfo::imageFormat
ImageFormatInfo imageFormat
Image format struct that contains all necessary image information.
Definition: ImageProcessor.h:496
armarx::Polygon2D::isInside
bool isInside(FramedPositionPtr point) const
Definition: Util.h:84
armarx::OpenPoseEstimation::onExitImageProcessor
void onExitImageProcessor() override
Exit the ImapeProcessor component.
Definition: OpenPoseEstimation.cpp:246
MAX_LAYER
#define MAX_LAYER
Definition: OpenPoseEstimation.cpp:51
armarx::OpenPoseEstimation::generate2DKeypoints
KeypointManagerPtr generate2DKeypoints(PoseKeypoints &keypoints, const CByteImage &rgbImage) const
Definition: OpenPoseEstimation.cpp:843
armarx::RunningTask
Definition: ArmarXMultipleObjectsScheduler.h:35
ColorUtils.h
armarx::OpenPoseEstimation::start
void start(const Ice::Current &=Ice::emptyCurrent) override
Definition: OpenPoseEstimation.cpp:649
visionx::tools::createByteImage
CByteImage * createByteImage(const ImageFormatInfo &imageFormat, const ImageType imageType)
Creates a ByteImage for the destination type specified in the given imageProviderInfo.
ARMARX_FATAL
#define ARMARX_FATAL
Definition: Logging.h:192
IceInternal::Handle< FramedPosition >
visionx::tools::convert
CByteImage::ImageType convert(const ImageType visionxImageType)
Converts a VisionX image type into an image type of IVT's ByteImage.
Definition: TypeMapping.cpp:95
armarx::OpenPoseEstimation::VisualizeTransparentImageMask
static void VisualizeTransparentImageMask(CByteImage &resultImage, const CByteImage &maskedInputImage, int brightnessIncrease, const CByteImage &inputImage)
Definition: OpenPoseEstimation.cpp:506
armarx::OpenPoseEstimation::onConnectImageProcessor
void onConnectImageProcessor() override
Implement this method in the ImageProcessor in order execute parts when the component is fully initia...
Definition: OpenPoseEstimation.cpp:134
armarx::ctrlutil::a
double a(double t, double a0, double j)
Definition: CtrlUtil.h:45
HumanPoseBody25.h
FramedPose.h
armarx::OpenPoseEstimation::filterKeypointsBasedOnWorkspacePolygon
void filterKeypointsBasedOnWorkspacePolygon(KeypointObjectPtr object)
Definition: OpenPoseEstimation.cpp:889
armarx::FromTopic
@ FromTopic
Definition: OpenPoseEstimation.h:63
ARMARX_DEBUG
#define ARMARX_DEBUG
Definition: Logging.h:177
GfxTL::Vec3d
VectorXD< 3, double > Vec3d
Definition: VectorXD.h:695
armarx::aron::input
ReaderT::InputType & input
Definition: rw.h:19
armarx::Vector3
The Vector3 class.
Definition: Pose.h:112
visionx::ImageProcessor::usingImageProvider
void usingImageProvider(std::string name)
Registers a delayed topic subscription and a delayed provider proxy retrieval which all will be avail...
Definition: ImageProcessor.cpp:117
armarx::KeypointPtr
std::shared_ptr< Keypoint > KeypointPtr
Definition: KeypointManager.h:104
armarx::Polygon2D
Definition: Util.h:48
ARMARX_ERROR
#define ARMARX_ERROR
Definition: Logging.h:189
visionx::ImageProcessor::getImages
int getImages(CByteImage **ppImages)
Poll images from provider.
Definition: ImageProcessor.cpp:351
armarx::OpenPoseEstimation::getDominantColorOfPatch
DrawColor24Bit getDominantColorOfPatch(const CByteImage &image, const Vec2d &point, int windowSize=10) const
Definition: OpenPoseEstimation.cpp:739
armarx::ImageKeypointBuffer
A brief description.
Definition: ImageKeypointBuffer.h:37
armarx::to_string
const std::string & to_string(const std::string &s)
Definition: StringHelpers.h:40
ExpressionException.h
armarx::control::common::getValue
T getValue(nlohmann::json &userConfig, nlohmann::json &defaultConfig, const std::string &entryName)
Definition: utils.h:71
armarx::OpenPoseEstimation::calculate2DFromOpenPose
void calculate2DFromOpenPose()
Definition: OpenPoseEstimation.cpp:916
armarx::ManagedIceObject::usingTopic
void usingTopic(const std::string &name, bool orderedPublishing=false)
Registers a proxy for subscription after initialization.
Definition: ManagedIceObject.cpp:248
armarx::Component::getConfigIdentifier
std::string getConfigIdentifier()
Retrieve config identifier for this component as set in constructor.
Definition: Component.cpp:74
armarx::Polygon2D::Point::x
float x
Definition: Util.h:52
armarx::OpenPoseEstimation::onDisconnectImageProcessor
void onDisconnectImageProcessor() override
Implement this method in the ImageProcessor in order execute parts when the component looses network ...
Definition: OpenPoseEstimation.cpp:230
armarx::ImageKeypointBuffer::ImageKeypointTriplePtr
std::shared_ptr< ImageKeypointTriple > ImageKeypointTriplePtr
Definition: ImageKeypointBuffer.h:61
visionx::ImageProviderInfo::proxy
ImageProviderInterfacePrx proxy
proxy to image provider
Definition: ImageProcessor.h:472
ARMARX_CHECK_EXPRESSION
#define ARMARX_CHECK_EXPRESSION(expression)
This macro evaluates the expression and if it turns out to be false it will throw an ExpressionExcept...
Definition: ExpressionException.h:73
armarx::OpenPoseEstimation::calculate3DFromDepthImage
void calculate3DFromDepthImage(KeypointManagerPtr manager)
Definition: OpenPoseEstimation.cpp:975
TimeUtil.h
ARMARX_INFO
#define ARMARX_INFO
Definition: Logging.h:174
armarx::ArVizComponentPluginUser::arviz
armarx::viz::Client arviz
Definition: ArVizComponentPlugin.h:43
armarx::OpenPoseEstimation::createPropertyDefinitions
armarx::PropertyDefinitionsPtr createPropertyDefinitions() override
Definition: OpenPoseEstimation.cpp:69
armarx::RemoteRobot::createLocalClone
VirtualRobot::RobotPtr createLocalClone()
Clones the structure of this remote robot to a local instance.
Definition: RemoteRobot.cpp:328
armarx::IdNameMap
const std::map< unsigned int, std::string > & IdNameMap
Definition: KeypointManager.h:121
OpenPoseEstimation.h
armarx::ManagedIceObject::offeringTopic
void offeringTopic(const std::string &name)
Registers a topic for retrival after initialization.
Definition: ManagedIceObject.cpp:290
armarx::MPII_TO_MPI
const std::map< std::string, std::pair< unsigned int, std::string > > MPII_TO_MPI
Definition: Util.h:30
IceUtil::Handle< class PropertyDefinitionContainer >
visionx::ImageProcessor::enableResultImages
void enableResultImages(int numberImages, ImageDimension imageDimension, ImageType imageType, const std::string &name="")
Enables visualization.
Definition: ImageProcessor.cpp:227
armarx::OpenPoseEstimation::visualize3DKeypoints
void visualize3DKeypoints()
Definition: OpenPoseEstimation.cpp:1064
armarx::RemoteRobot::synchronizeLocalCloneToTimestamp
static bool synchronizeLocalCloneToTimestamp(VirtualRobot::RobotPtr robot, RobotStateComponentInterfacePrx robotStatePrx, Ice::Long timestamp)
Synchronizes a local robot to a robot state at timestamp.
Definition: RemoteRobot.cpp:487
armarx::OpenPoseEstimation::onMessage
void onMessage(const Texting::TextMessage &text, const Ice::Current &=Ice::emptyCurrent) override
Definition: OpenPoseEstimation.cpp:788
memoryx::KBM::Vector3
Eigen::Vector3d Vector3
Definition: kbm.h:41
visionx::tools::typeNameToImageType
ImageType typeNameToImageType(const std::string &imageTypeName)
Converts an image type name as string into an ImageType integer.
Definition: TypeMapping.cpp:42
ImageUtil.h
armarx::viz::addPoseToLayer
void addPoseToLayer(const armarx::armem::human::HumanPose &pose, HumanPoseLayers &layers, const std::string &prefix)
Definition: HumanPose.cpp:16
visionx::ImageProcessor::provideResultImages
void provideResultImages(CByteImage **images, armarx::MetaInfoSizeBasePtr info=nullptr)
sends result images for visualization
Definition: ImageProcessor.cpp:245
armarx::Logging::deactivateSpam
SpamFilterDataPtr deactivateSpam(float deactivationDurationSec=10.0f, const std::string &identifier="", bool deactivate=true) const
disables the logging for the current line for the given amount of seconds.
Definition: Logging.cpp:92
TypeMapping.h
armarx::ArmarXDataPath::getAbsolutePath
static bool getAbsolutePath(const std::string &relativeFilename, std::string &storeAbsoluteFilename, const std::vector< std::string > &additionalSearchPaths={}, bool verbose=true)
Definition: ArmarXDataPath.cpp:111
armarx::OpenPoseEstimationPropertyDefinitions
Definition: OpenPoseEstimation.h:70
armarx::OpenPoseEstimation::calculate3DFromStereoImage
void calculate3DFromStereoImage(KeypointManagerPtr manager)
Definition: OpenPoseEstimation.cpp:1037
armarx::OpenPoseEstimation::getMedianDepthFromImage
int getMedianDepthFromImage(int x, int y, int radius) const
Definition: OpenPoseEstimation.cpp:1139
armarx::Polygon2D::Point
Definition: Util.h:50
T
float T
Definition: UnscentedKalmanFilterTest.cpp:35
armarx::VariantType::FramedPosition
const VariantTypeId FramedPosition
Definition: FramedPose.h:39
armarx::OpenPoseEstimation::filterToNearest
void filterToNearest()
Definition: OpenPoseEstimation.cpp:911
armarx::OpenPoseEstimation::maskOutBasedOnDepth
void maskOutBasedOnDepth(CByteImage &image, int maxDepth)
Definition: OpenPoseEstimation.cpp:706
ARMARX_WARNING
#define ARMARX_WARNING
Definition: Logging.h:186
armarx::viz::Client::layer
Layer layer(std::string const &name) const
Definition: Client.cpp:73
armarx::PropertyDefinitionsPtr
IceUtil::Handle< class PropertyDefinitionContainer > PropertyDefinitionsPtr
PropertyDefinitions smart pointer type.
Definition: forward_declarations.h:34
armarx::viz::Layer
Definition: Layer.h:12
armarx::ctrlutil::s
double s(double t, double s0, double v0, double a0, double j)
Definition: CtrlUtil.h:33
armarx::OpenPoseEstimation::stop3DPoseEstimation
void stop3DPoseEstimation(const Ice::Current &=Ice::emptyCurrent) override
Definition: OpenPoseEstimation.cpp:690
armarx
This file offers overloads of toIce() and fromIce() functions for STL container types.
Definition: ArmarXTimeserver.cpp:28
armarx::split
std::vector< std::string > split(const std::string &source, const std::string &splitBy, bool trimElements=false, bool removeEmptyElements=false)
Definition: StringHelpers.cpp:36
armarx::FromDepthImage
@ FromDepthImage
Definition: OpenPoseEstimation.h:61
visionx::ImageProcessor::waitForImages
bool waitForImages(int milliseconds=1000)
Wait for new images.
Definition: ImageProcessor.cpp:275
armarx::OpenPoseEstimation::calculate2DFromTopic
void calculate2DFromTopic()
Definition: OpenPoseEstimation.cpp:957
armarx::Polygon2D::Point::y
float y
Definition: Util.h:53