9 #include <opencv2/core/hal/interface.h>
10 #include <opencv2/imgproc/imgproc.hpp>
12 #include <IceUtil/Time.h>
32 #include <Calibration/Calibration.h>
33 #include <Image/ImageProcessor.h>
35 #ifdef INCLUDE_BODY_TRACKING
48 k4aToIvtImage(
const k4a::image& color_image, ::CByteImage& result)
50 if (color_image.get_format() == K4A_IMAGE_FORMAT_COLOR_YUY2)
53 cv::Mat yuy2_image(color_image.get_height_pixels(),
54 color_image.get_width_pixels(),
56 const_cast<uint8_t*
>(color_image.get_buffer()));
58 cv::cvtColor(yuy2_image, rgb_image, cv::COLOR_YUV2RGB_YUY2);
63 else if (color_image.get_format() == K4A_IMAGE_FORMAT_COLOR_BGRA32)
65 auto cw =
static_cast<unsigned int>(color_image.get_width_pixels());
66 auto ch =
static_cast<unsigned int>(color_image.get_height_pixels());
70 auto color_buffer = color_image.get_buffer();
71 auto rgb_buffer_ivt = result.pixels;
79 for (
unsigned int y = 0; y < ch; ++y)
81 for (
unsigned int x = 0;
x < cw; ++
x)
83 rgb_buffer_ivt[index_ivt] = color_buffer[index_k4a + 2];
84 rgb_buffer_ivt[index_ivt + 1] = color_buffer[index_k4a + 1];
85 rgb_buffer_ivt[index_ivt + 2] = color_buffer[index_k4a + 0];
93 throw std::runtime_error(
"Unsupported color format in k4a image");
98 #ifdef INCLUDE_BODY_TRACKING
100 printBodyInformation(k4abt_body_t body)
103 for (
int i = 0; i < static_cast<int>(K4ABT_JOINT_COUNT); i++)
105 const k4a_float3_t position = body.skeleton.joints[i].position;
106 const k4a_quaternion_t orientation = body.skeleton.joints[i].orientation;
107 const k4abt_joint_confidence_level_t confidence_level =
108 body.skeleton.joints[i].confidence_level;
111 <<
"Position[mm] (" << position.v[0] <<
"," << position.v[1] <<
","
112 << position.v[2] <<
"); "
113 <<
"Orientation (" << orientation.v[0] <<
"," << orientation.v[1] <<
","
114 << orientation.v[2] <<
"," << orientation.v[3] <<
"); "
115 <<
"Confidence Level " << confidence_level;
121 k4aImageFormatToString(k4a_image_format_t format)
125 case K4A_IMAGE_FORMAT_COLOR_MJPG:
127 case K4A_IMAGE_FORMAT_COLOR_NV12:
129 case K4A_IMAGE_FORMAT_COLOR_YUY2:
131 case K4A_IMAGE_FORMAT_COLOR_BGRA32:
132 return "COLOR_BGRA32";
133 case K4A_IMAGE_FORMAT_DEPTH16:
135 case K4A_IMAGE_FORMAT_IR16:
137 case K4A_IMAGE_FORMAT_CUSTOM:
154 std::string name =
"duration " + description +
" in [ms]";
156 std::lock_guard g{metaInfoMtx};
161 std::lock_guard g{debugObserverMtx};
171 defineOptionalProperty<k4a_color_resolution_t>(
172 "ColorResolution", K4A_COLOR_RESOLUTION_720P,
"Resolution of the RGB camera image.")
173 .map(
"0", K4A_COLOR_RESOLUTION_OFF)
174 .map(
"720", K4A_COLOR_RESOLUTION_720P)
175 .map(
"1080", K4A_COLOR_RESOLUTION_1080P)
176 .map(
"1440", K4A_COLOR_RESOLUTION_1440P)
177 .map(
"1536", K4A_COLOR_RESOLUTION_1536P)
178 .map(
"2160", K4A_COLOR_RESOLUTION_2160P)
179 .map(
"3072", K4A_COLOR_RESOLUTION_3072P);
180 defineOptionalProperty<k4a_depth_mode_t>(
181 "DepthMode", K4A_DEPTH_MODE_NFOV_UNBINNED,
"Resolution/mode of the depth camera image.")
182 .setCaseInsensitive(
true)
183 .map(
"OFF", K4A_DEPTH_MODE_OFF)
184 .map(
"NFOV_2X2BINNED", K4A_DEPTH_MODE_NFOV_2X2BINNED)
185 .map(
"NFOV_UNBINNED", K4A_DEPTH_MODE_NFOV_UNBINNED)
186 .map(
"WFOV_2X2BINNED", K4A_DEPTH_MODE_WFOV_2X2BINNED)
187 .map(
"WFOV_UNBINNED", K4A_DEPTH_MODE_WFOV_UNBINNED)
188 .map(
"PASSIVE_IR", K4A_DEPTH_MODE_PASSIVE_IR);
190 defineOptionalProperty<float>(
"MaxDepth",
192 "Max. allowed depth value in mm. Depth values above this "
193 "threshold will be set to nan.");
195 defineOptionalProperty<float>(
"CaptureTimeOffset",
197 "In Milliseconds. Time offset between capturing the image on "
198 "the hardware and receiving the image in this process.",
201 defineOptionalProperty<k4a_image_format_t>(
202 "ColorFormat", K4A_IMAGE_FORMAT_COLOR_BGRA32,
"Color format of the RGB camera image.")
203 .map(
"BGRA", K4A_IMAGE_FORMAT_COLOR_BGRA32)
204 .map(
"YUY2", K4A_IMAGE_FORMAT_COLOR_YUY2);
214 enableColorUndistortion,
215 "EnableColorUndistortion",
216 "Undistort the color images using the full 8 radial and tangential distortion "
217 "parameters provided by the Azure Kinect.\n"
218 "This can help for processing tasks which cannot handle radial parameters k3-k6.\n"
219 "Note that this drastically reduces the FPS (to something like 3).");
220 defs->optional(externalCalibrationFilePath,
221 "ExternalCalibrationFilePath",
222 "Path to an optional external"
223 " calibration file, which has a"
224 " camera matrix and distortion"
226 defs->optional(mDeviceId,
"device_id",
"ID of the device.");
228 defs->optional(robotName,
"robotName");
229 defs->optional(bodyCameraFrameName,
"bodyCameraFrameName");
230 defs->optional(framerate.value,
232 "The framerate of RGB-D images [frames per second]."
233 "\nNote that the point cloud and body tracking frame rates are controlled by"
234 " the properties 'framerate' (point cloud) and 'framerate.bodyTracking'"
235 " (body tracking), respectively.")
239 #ifdef INCLUDE_BODY_TRACKING
240 defs->optional(bodyTrackingEnabled,
241 "bodyTrackingEnabled",
242 "Whether the Azure Kinect Body Tracking SDK should be enabled or not.");
243 defs->optional(bodyTrackingRunAtStart,
244 "bodyTrackingRunAtStart",
245 "Whether the Azure Kinect Body Tracking SDK should directly run when the "
246 "component is startet."
247 "Otherwise it has to be activated by the ice interface.");
248 defs->optional(bodyTrackingModelFilename,
249 "bodyTrackingModelPath",
250 "Path where the .onnx DNN files can be found");
251 defs->optional(bodyTrackingGPUDeviceID,
"bodyTrackingGPUDeviceID",
"GPU Device ID.");
252 defs->optional(bodyTrackingTemporalSmoothingFactor,
253 "bodyTrackingTemporalSmoothingFactor",
254 "Temporal smoothing factor for Azure Kinect body tracking.");
256 defs->optional(bodyTrackingDepthMaskMinX,
"bodyTrackingDepthMaskMinX");
257 defs->optional(bodyTrackingDepthMaskMaxX,
"bodyTrackingDepthMaskMaxX");
258 defs->optional(bodyTrackingDepthMaskMaxZ,
"bodyTrackingDepthMaskMaxZ");
259 defs->optional(framerate.bodyTracking.value,
260 "framerate.bodyTracking",
261 "The framerate with with the body tracking is run [frames per second]."
262 "\nNote that the RGB-D image and point cloud frame rates are controlled by"
263 " the properties 'framerate.image' (RGB-D images) and 'framerate'"
264 " (point cloud), respectively.")
268 defs->optional(startIMU,
"startIMU");
270 humanPoseWriter.registerPropertyDefinitions(defs);
273 defs->optional(enableHeartbeat,
"enableHeartbeat");
281 config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL;
282 if (framerate.value == 5)
284 config.camera_fps = K4A_FRAMES_PER_SECOND_5;
286 else if (framerate.value == 15)
288 config.camera_fps = K4A_FRAMES_PER_SECOND_15;
290 else if (framerate.value == 30)
292 config.camera_fps = K4A_FRAMES_PER_SECOND_30;
296 throw armarx::LocalException(
"Invalid image framerate (property 'framerate.images'): ")
297 << framerate.value <<
". Only framerates 5, 15 and 30 are "
298 <<
"supported by Azure Kinect.";
301 framerate.pointCloud.value =
302 getProperty<float>(
"framerate");
303 framerate.pointCloud.update(framerate.value);
305 #ifdef INCLUDE_BODY_TRACKING
306 framerate.bodyTracking.update(framerate.value);
309 config.depth_mode = getProperty<k4a_depth_mode_t>(
"DepthMode");
310 config.color_format = getProperty<k4a_image_format_t>(
"ColorFormat");
311 config.color_resolution = getProperty<k4a_color_resolution_t>(
"ColorResolution");
315 config.synchronized_images_only =
true;
323 ARMARX_INFO <<
"Depth image size: " << depth_dim.first <<
"x" << depth_dim.second;
324 ARMARX_INFO <<
"Color image size: " << color_dim.first <<
"x" << color_dim.second;
327 k4a::image::create(K4A_IMAGE_FORMAT_DEPTH16,
330 color_dim.first * 2 *
static_cast<int32_t
>(
sizeof(uint8_t)));
332 setImageFormat(visionx::ImageDimension(color_dim.first, color_dim.second),
334 visionx::eBayerPatternGr);
338 std::make_unique<CByteImage>(color_dim.first, color_dim.second, CByteImage::eRGB24);
340 xyzImage = k4a::image::create(K4A_IMAGE_FORMAT_CUSTOM,
343 color_dim.first * 3 *
static_cast<int32_t
>(
sizeof(int16_t)));
345 pointcloud = std::make_unique<pcl::PointCloud<CloudPointType>>();
358 pointcloudTask->start();
360 #ifdef INCLUDE_BODY_TRACKING
361 if (bodyTrackingEnabled)
364 humanPoseWriter.connect(memoryNameSystem());
368 this, &AzureKinectPointCloudProvider::runPublishBodyTrackingResults);
377 {
"Vision",
"Camera"},
378 "AzureKinectPointCloudProvider");
391 std::lock_guard<std::mutex> lock(pointcloudProcMutex);
392 ARMARX_DEBUG <<
"Stopping pointcloud processing thread...";
393 const bool WAIT_FOR_JOIN =
false;
394 pointcloudTask->stop(WAIT_FOR_JOIN);
395 pointcloudProcSignal.notify_all();
396 ARMARX_DEBUG <<
"Waiting for pointcloud processing thread to stop...";
397 pointcloudTask->waitForStop();
401 #ifdef INCLUDE_BODY_TRACKING
402 if (bodyTrackingIsRunning)
404 bodyTrackingPublishTask->stop();
434 const uint32_t DEVICE_COUNT = k4a::device::get_installed_count();
435 if (DEVICE_COUNT == 0)
438 throw armarx::LocalException(
"No Azure Kinect devices detected!");
443 device = k4a::device::open(
static_cast<uint32_t
>(mDeviceId));
444 ARMARX_DEBUG <<
"Opened device id #" << mDeviceId <<
" with serial number "
445 << device.get_serialnum() <<
".";
454 k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);
456 <<
"Color camera calibration:"
458 <<
"cx: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.cx
460 <<
"cy: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.cy
462 <<
"fx: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.fx
464 <<
"fy: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.fy
466 <<
"k1: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.k1
468 <<
"k2: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.k2
470 <<
"p1: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.p1
472 <<
"p2: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.p2
474 <<
"k3: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.k3
476 <<
"k4: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.k4
478 <<
"k5: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.k5
480 <<
"k6: " << k4aCalibration.color_camera_calibration.intrinsics.parameters.param.k6;
485 c_left->PrintCameraParameters();
488 c_right->PrintCameraParameters();
491 transformation = k4a::transformation(k4aCalibration);
493 #ifdef INCLUDE_BODY_TRACKING
494 if (bodyTrackingEnabled)
500 bodyTrackingModelFilename);
501 ARMARX_CHECK(found) <<
"Body tracking DNN model could not be found/resolved at `"
502 << bodyTrackingModelFilename <<
"`.";
504 ARMARX_INFO <<
"Using body tracking DNN model from directory `"
505 << bodyTrackingModelFilename <<
"`.";
507 ARMARX_CHECK(std::filesystem::exists(bodyTrackingModelFilename))
508 <<
"The path `" << bodyTrackingModelFilename <<
"` does not exist!";
510 k4abt_tracker_configuration_t
const bodyTrackingConfig{
511 .sensor_orientation = K4ABT_SENSOR_ORIENTATION_DEFAULT,
512 .processing_mode = K4ABT_TRACKER_PROCESSING_MODE_GPU_CUDA,
513 .gpu_device_id = bodyTrackingGPUDeviceID,
514 .model_path = bodyTrackingModelFilename.c_str()};
516 bodyTracker = k4abt::tracker::create(k4aCalibration, bodyTrackingConfig);
517 bodyTracker.set_temporal_smoothing(bodyTrackingTemporalSmoothingFactor);
519 if (bodyTrackingRunAtStart and not bodyTrackingIsRunning)
521 bodyTrackingIsRunning =
true;
522 bodyTrackingPublishTask->start();
528 device.set_color_control(K4A_COLOR_CONTROL_BRIGHTNESS, K4A_COLOR_CONTROL_MODE_MANUAL, 128);
530 device.start_cameras(&config);
557 const k4a_calibration_camera_t calibration{k4aCalibration.color_camera_calibration};
558 const k4a_calibration_intrinsic_parameters_t::_param param =
559 calibration.intrinsics.parameters.param;
566 cv::Mat1f camera_matrix(3, 3);
567 camera_matrix << param.fx, 0, param.cx, 0, param.fy, param.cy, 0, 0, 1;
568 cv::Mat1f new_camera_matrix(3, 3);
569 new_camera_matrix << param.fx, 0, param.cx, 0, param.fx, param.cy, 0, 0, 1;
570 cv::Mat1f distortion_coeff(1, 8);
571 distortion_coeff << param.k1, param.k2, param.p1, param.p2, param.k3, param.k4,
573 cv::Mat map1, map2, map3;
574 cv::initUndistortRectifyMap(
579 cv::Size{calibration.resolution_width, calibration.resolution_height},
583 cv::convertMaps(map1, map2, colorDistortionMap, map3, CV_16SC2,
true);
605 const std::chrono::milliseconds TIMEOUT{1000};
607 StopWatch sw_get_capture;
615 catch (
const std::exception&)
617 ARMARX_WARNING <<
"Failed to get capture from device (#" << ++mDiagnostics.num_crashes
618 <<
"). Restarting camera.";
620 device.stop_cameras();
621 device.start_cameras(&config);
622 ARMARX_INFO <<
"Restarting took " << sw.stop() <<
".";
631 std::lock_guard g{metaInfoMtx};
636 const bool mustInitializeTimestampOffset = [&]()
638 std::lock_guard g{deviceToRealtimeOffsetMtx};
639 return device_to_realtime_offset_.count() == 0;
641 if (mustInitializeTimestampOffset)
648 capture.get_ir_image().get_system_timestamp());
650 const k4a::image DEPTH_IMAGE =
capture.get_depth_image();
654 const k4a_image_format_t IMAGE_FORMAT = DEPTH_IMAGE.get_format();
655 if (IMAGE_FORMAT != K4A_IMAGE_FORMAT_DEPTH16 && IMAGE_FORMAT != K4A_IMAGE_FORMAT_IR16)
657 const char* format_str = ::k4aImageFormatToString(IMAGE_FORMAT);
658 std::stringstream error_msg;
659 error_msg <<
"Attempted to colorize a non-depth image with format: " << format_str;
660 throw std::logic_error(error_msg.str());
664 if (not framerate.pointCloud.skip())
670 std::lock_guard lock{pointcloudProcMutex};
671 transformation.depth_image_to_color_camera(DEPTH_IMAGE, &alignedDepthImage);
674 depthImageReady =
true;
678 pointcloudProcSignal.notify_one();
681 #ifdef INCLUDE_BODY_TRACKING
682 if (bodyTrackingIsRunning)
685 std::scoped_lock lock(bodyTrackingParameterMutex);
687 if (not framerate.bodyTracking.skip())
689 k4a::image ir_image =
capture.get_ir_image();
690 std::uint8_t* ir_image_buffer = ir_image.get_buffer();
692 k4a::image depth_image =
capture.get_depth_image();
693 std::uint8_t* depth_image_buffer = depth_image.get_buffer();
697 depth_image.get_height_pixels());
699 const int stride = ir_image.get_stride_bytes() / ir_image.get_width_pixels();
701 for (
int x = 0;
x < ir_image.get_width_pixels();
x++)
703 for (
int y = 0; y < ir_image.get_height_pixels(); y++)
705 const int i = (y * ir_image.get_width_pixels() * stride) + (
x * stride);
706 const int z = (
static_cast<int>(depth_image_buffer[i])) +
707 (
static_cast<int>(depth_image_buffer[i + 1]) << 8);
709 if ((bodyTrackingDepthMaskMinX > 0 and
x < bodyTrackingDepthMaskMinX) or
710 (bodyTrackingDepthMaskMaxX > 0 and
x > bodyTrackingDepthMaskMaxX) or
711 (bodyTrackingDepthMaskMaxZ > 0 and z > bodyTrackingDepthMaskMaxZ))
716 depth_image_buffer[i + 1] =
722 if (not bodyTracker.enqueue_capture(
capture))
730 const k4a::image COLOR_IMAGE =
capture.get_color_image();
733 auto real_time = IceUtil::Time::now();
734 auto monotonic_time = IceUtil::Time::now(IceUtil::Time::Monotonic);
735 auto clock_diff = real_time - monotonic_time;
737 auto image_monotonic_time =
738 IceUtil::Time::microSeconds(std::chrono::duration_cast<std::chrono::microseconds>(
739 DEPTH_IMAGE.get_system_timestamp())
741 long offset = long(getProperty<float>(
"CaptureTimeOffset").getValue() * 1000.0f);
743 imagesTime = image_monotonic_time + clock_diff - IceUtil::Time::microSeconds(offset);
746 std::lock_guard g{metaInfoMtx};
752 std::lock_guard g{debugObserverMtx};
754 (real_time - imagesTime).toMilliSecondsDouble());
757 if (enableColorUndistortion)
760 cv::Mat tmp_rgb_image;
762 if (COLOR_IMAGE.get_format() == K4A_IMAGE_FORMAT_COLOR_YUY2)
765 cv::Mat yuy2_image(COLOR_IMAGE.get_height_pixels(),
766 COLOR_IMAGE.get_width_pixels(),
768 const_cast<uint8_t*
>(COLOR_IMAGE.get_buffer()));
770 cv::cvtColor(yuy2_image, tmp_rgb_image, cv::COLOR_YUV2RGB_YUY2);
772 else if (COLOR_IMAGE.get_format() == K4A_IMAGE_FORMAT_COLOR_BGRA32)
774 cv::cvtColor(cv::Mat{cv::Size{COLOR_IMAGE.get_width_pixels(),
775 COLOR_IMAGE.get_height_pixels()},
777 (
void*)COLOR_IMAGE.get_buffer(),
784 throw std::runtime_error(
"Unsupported color format in k4a image");
787 cv::Mat cv_color_image_undistorted(
788 COLOR_IMAGE.get_width_pixels(), COLOR_IMAGE.get_height_pixels(), CV_8UC3);
790 cv::remap(tmp_rgb_image,
791 cv_color_image_undistorted,
795 cv::BORDER_CONSTANT);
803 ::k4aToIvtImage(COLOR_IMAGE, *resultColorImage);
811 const int DW = alignedDepthImage.get_width_pixels();
812 const int DH = alignedDepthImage.get_height_pixels();
818 auto result_depth_buffer = resultDepthImage->pixels;
819 const auto* depth_buffer =
820 reinterpret_cast<const uint16_t*
>(alignedDepthImage.get_buffer());
824 for (
int y = 0; y < DH; ++y)
826 for (
int x = 0;
x < DW; ++
x)
828 uint16_t depth_value = depth_buffer[index_2];
830 result_depth_buffer[
index],
831 result_depth_buffer[
index + 1],
832 result_depth_buffer[
index + 2]);
842 CByteImage* images[2] = {resultColorImage.get(), resultDepthImage.get()};
848 std::unique_lock signal_lock{pointcloudProcMutex};
849 ARMARX_DEBUG <<
"Capturing thread waiting for signal...";
850 pointcloudProcSignal.wait(signal_lock, [&] {
return depthImageProcessed; });
855 std::lock_guard g{debugObserverMtx};
871 std::lock_guard g{debugObserverMtx};
882 Eigen::Vector3f accMean = Eigen::Vector3f::Zero();
885 k4a_imu_sample_t imu_sample;
886 while (device.get_imu_sample(&imu_sample, std::chrono::milliseconds(0)))
888 const Eigen::Vector3f acceleration{imu_sample.acc_sample.xyz.x,
889 imu_sample.acc_sample.xyz.y,
890 imu_sample.acc_sample.xyz.z};
892 accMean += acceleration;
896 accMean /=
static_cast<float>(cnt);
901 std::lock_guard g{debugObserverMtx};
910 catch (
const std::exception&)
913 << ++mDiagnostics.num_crashes <<
").";
920 #ifdef INCLUDE_BODY_TRACKING
922 AzureKinectPointCloudProvider::runPublishBodyTrackingResults()
925 while (bodyTrackingIsRunning)
929 const k4abt::frame body_frame = [&]() -> k4abt::frame
933 auto result = bodyTracker.pop_result();
944 if (body_frame !=
nullptr)
953 auto real_time = IceUtil::Time::now();
954 auto monotonic_time = IceUtil::Time::now(IceUtil::Time::Monotonic);
955 auto clock_diff = real_time - monotonic_time;
957 auto image_monotonic_time = IceUtil::Time::microSeconds(
958 std::chrono::duration_cast<std::chrono::microseconds>(
959 body_frame.get_system_timestamp())
967 std::lock_guard g{debugObserverMtx};
969 imageTime.toMicroSeconds());
978 auto real_time = realtimeClock.
now();
979 auto monotonic_time = monotonicClock.
now();
980 auto clock_diff = real_time - monotonic_time;
983 std::chrono::duration_cast<std::chrono::microseconds>(
984 body_frame.get_system_timestamp())
989 auto imageTime = image_monotonic_time + clock_diff;
994 std::lock_guard g{debugObserverMtx};
996 imageTime.toMicroSeconds());
1001 std::uint32_t num_bodies = body_frame.get_num_bodies();
1003 std::lock_guard g{debugObserverMtx};
1007 std::vector<armarx::armem::human::HumanPose> humanPoses;
1008 humanPoses.reserve(num_bodies);
1010 for (std::uint32_t i = 0; i < num_bodies; i++)
1012 k4abt_body_t body = body_frame.get_body(i);
1013 printBodyInformation(body);
1020 for (
int i = 0; i < static_cast<int>(K4ABT_JOINT_COUNT); i++)
1027 k4a_float3_t position = body.skeleton.joints[i].position;
1028 k4a_quaternion_t orientation = body.skeleton.joints[i].orientation;
1029 k4abt_joint_confidence_level_t confidence_level =
1030 body.skeleton.joints[i].confidence_level;
1034 .confidence =
static_cast<float>(
static_cast<int>(confidence_level)),
1036 Eigen::Vector3f{position.v[0], position.v[1], position.v[2]},
1037 bodyCameraFrameName,
1039 .orientationCamera =
1044 .toRotationMatrix(),
1045 bodyCameraFrameName,
1049 humanPoses.push_back(humanPose);
1053 std::lock_guard g{debugObserverMtx};
1062 humanPoseWriter.commitHumanPosesInCameraFrame(humanPoses,
getName(),
timestamp);
1082 metronome.waitForNextTick();
1095 while (not pointcloudTask->isStopped())
1098 std::unique_lock signal_lock{pointcloudProcMutex};
1099 ARMARX_DEBUG <<
"Pointcloud thread waiting for signal...";
1100 pointcloudProcSignal.wait(
1101 signal_lock, [&] {
return pointcloudTask->isStopped() or depthImageReady; });
1106 depthImageReady =
false;
1107 depthImageProcessed =
false;
1112 transformation.depth_image_to_point_cloud(
1113 alignedDepthImage, K4A_CALIBRATION_TYPE_COLOR, &xyzImage);
1114 ARMARX_DEBUG <<
"Transforming depth image to point cloud took "
1124 pointcloud->width =
static_cast<uint32_t
>(xyzImage.get_width_pixels());
1125 pointcloud->height =
static_cast<uint32_t
>(xyzImage.get_height_pixels());
1129 pointcloud->is_dense =
false;
1130 pointcloud->points.resize(pointcloud->width * pointcloud->height);
1132 auto k4a_cloud_buffer =
reinterpret_cast<const int16_t*
>(xyzImage.get_buffer());
1136 unsigned char* color_buffer = resultColorImage->pixels;
1137 while (color_buffer ==
nullptr)
1139 ARMARX_INFO <<
"color_buffer is null. This should never happen. There is "
1140 "probably a race condition somewhere that needs to be fixed. "
1141 "Temporarily we ignore this and continue.\n Timestamp: "
1143 color_buffer = resultColorImage->pixels;
1150 pointcloud->width * pointcloud->height);
1155 float max_depth = getProperty<float>(
"MaxDepth").getValue();
1156 for (
auto& p : pointcloud->points)
1158 p.r = color_buffer[
index];
1159 p.x = k4a_cloud_buffer[
index];
1163 p.g = color_buffer[
index];
1164 p.y = k4a_cloud_buffer[
index];
1168 p.b = color_buffer[
index];
1169 auto z = k4a_cloud_buffer[
index];
1173 if (z <= max_depth and z != 0)
1179 p.z = std::numeric_limits<float>::quiet_NaN();
1188 depthImageProcessed =
true;
1189 signal_lock.unlock();
1191 pointcloudProcSignal.notify_all();
1198 pointcloud->header.stamp =
static_cast<unsigned long>(
TIMESTAMP.toMicroSeconds());
1211 return "AzureKinectPointCloudProvider";
1247 using namespace Eigen;
1252 const auto convert_calibration =
1253 [](
const k4a_calibration_camera_t& k4a_calib,
float scale = 1.f)
1255 MonocularCalibration monocular_calibration;
1257 const k4a_calibration_intrinsic_parameters_t& params = k4a_calib.intrinsics.parameters;
1258 monocular_calibration.cameraParam.principalPoint = {params.param.cx * scale,
1259 params.param.cy * scale};
1260 monocular_calibration.cameraParam.focalLength = {params.param.fx * scale,
1261 params.param.fy * scale};
1274 monocular_calibration.cameraParam.distortion = {
1282 monocular_calibration.cameraParam.width =
1283 std::floor(
float(k4a_calib.resolution_width) * scale);
1284 monocular_calibration.cameraParam.height =
1285 std::floor(
float(k4a_calib.resolution_height) * scale);
1287 const Matrix3FRowMajor rotation =
1288 Map<const Matrix3FRowMajor>{k4a_calib.extrinsics.rotation};
1291 monocular_calibration.cameraParam.translation = {k4a_calib.extrinsics.translation,
1292 k4a_calib.extrinsics.translation + 3};
1294 return monocular_calibration;
1297 StereoCalibration stereo_calibration;
1299 stereo_calibration.calibrationLeft =
1300 convert_calibration(k4aCalibration.color_camera_calibration);
1303 if (enableColorUndistortion)
1305 auto& colorDistortionParams = stereo_calibration.calibrationLeft.cameraParam.distortion;
1306 std::fill(colorDistortionParams.begin(), colorDistortionParams.end(), 0);
1313 stereo_calibration.calibrationRight =
1314 convert_calibration(k4aCalibration.color_camera_calibration);
1316 auto& depthDistortionParams =
1317 stereo_calibration.calibrationRight.cameraParam.distortion;
1318 std::fill(depthDistortionParams.begin(), depthDistortionParams.end(), 0);
1321 stereo_calibration.rectificationHomographyLeft =
1323 stereo_calibration.rectificationHomographyRight =
1326 return stereo_calibration;
1332 return enableColorUndistortion;
1338 return getProperty<std::string>(
"frameName");
1341 std::vector<imrec::ChannelPreferences>
1346 imrec::ChannelPreferences rgb;
1347 rgb.requiresLossless =
false;
1350 imrec::ChannelPreferences depth;
1351 depth.requiresLossless =
true;
1352 depth.name =
"depth";
1354 return {rgb, depth};
1359 const armarx::EnableHumanPoseEstimationInput&
input,
1360 const Ice::Current&)
1362 #ifndef INCLUDE_BODY_TRACKING
1364 ARMARX_ERROR <<
"INCLUDE_BODY_TRACKING is not defined.";
1369 #ifdef INCLUDE_BODY_TRACKING
1370 if (bodyTrackingEnabled)
1373 if (not bodyTrackingIsRunning and
input.enable3d)
1375 bodyTrackingIsRunning =
true;
1376 bodyTrackingPublishTask->start();
1378 else if (bodyTrackingIsRunning and not
input.enable3d)
1380 bodyTrackingIsRunning =
false;
1381 bodyTrackingPublishTask->stop();
1386 ARMARX_ERROR <<
"Azure Kinect Body Tracking is not enabled";
1394 std::scoped_lock lock(bodyTrackingParameterMutex);
1395 bodyTrackingDepthMaskMaxZ = maxDepthInMM;
1401 const Ice::Current&)
1403 std::scoped_lock lock(bodyTrackingParameterMutex);
1404 bodyTrackingDepthMaskMinX = minXinPixel;
1405 bodyTrackingDepthMaskMaxX = maxXinPixel;
1413 const std::chrono::microseconds& k4a_device_timestamp_us,
1414 const std::chrono::nanoseconds& k4a_system_timestamp_ns)
1422 std::chrono::nanoseconds realtime_clock =
1423 std::chrono::system_clock::now().time_since_epoch();
1424 std::chrono::nanoseconds monotonic_clock =
1425 std::chrono::steady_clock::now().time_since_epoch();
1427 std::chrono::nanoseconds monotonic_to_realtime = realtime_clock - monotonic_clock;
1430 std::chrono::nanoseconds device_to_realtime =
1431 k4a_system_timestamp_ns - k4a_device_timestamp_us + monotonic_to_realtime;
1434 std::lock_guard g{deviceToRealtimeOffsetMtx};
1437 std::lock_guard g{debugObserverMtx};
1439 device_to_realtime_offset_.count() /
1443 std::abs<float>((device_to_realtime_offset_ - device_to_realtime).count()) /
1444 1
'000.f); // [ns] -> [µs]
1447 const std::int64_t timeOffsetThreshold = 1e7; // 10 ms
1449 // If we're over
a certain time off, just snap into place.
1450 if (device_to_realtime_offset_.count() == 0 ||
1451 std::abs((device_to_realtime_offset_ - device_to_realtime).count()) >
1452 timeOffsetThreshold)
1454 ARMARX_VERBOSE << deactivateSpam()
1455 <<
"Initializing or re-initializing the device to realtime offset: "
1456 << device_to_realtime.count() <<
" ns";
1457 device_to_realtime_offset_ = device_to_realtime;
1462 constexpr double alpha = 0.10;
1464 const std::chrono::nanoseconds timeCorrection(static_cast<int64_t>(
1465 std::floor(alpha * (device_to_realtime - device_to_realtime_offset_).count())));
1466 device_to_realtime_offset_ = device_to_realtime_offset_ + timeCorrection;
1469 std::lock_guard g{debugObserverMtx};
1470 setDebugObserverDatafield(
"timeCorrection [µs]",
1471 timeCorrection.count() / 1
'000); // [ns] -> [µs]
1478 AzureKinectPointCloudProvider::timestampToArmarX(
1479 const std::chrono::microseconds& k4a_timestamp_us)
1481 std::lock_guard g{deviceToRealtimeOffsetMtx};
1483 // must be initialized beforehand
1484 ARMARX_CHECK(device_to_realtime_offset_.count() != 0);
1486 std::chrono::nanoseconds timestamp_in_realtime =
1487 k4a_timestamp_us + device_to_realtime_offset_;
1489 return armarx::Duration::MicroSeconds(timestamp_in_realtime.count() /
1495 const std::chrono::microseconds& k4a_device_timestamp_us)
1497 std::lock_guard g{deviceToRealtimeOffsetMtx};
1500 std::chrono::nanoseconds realtime_clock =
1501 std::chrono::system_clock::now().time_since_epoch();
1503 device_to_realtime_offset_ = realtime_clock - k4a_device_timestamp_us;
1505 ARMARX_INFO <<
"Initializing the device to realtime offset based on wall clock: "
1506 << device_to_realtime_offset_.count() <<
" ns";
1519 std::stringstream ss;
1520 ss <<
"Invalid value (" <<
value <<
" fps) for framerate of '" << name <<
"'."
1521 <<
" Reason: The framerate has to be a divider of the property framerate.image ("
1522 << higherFramerate <<
" fps).";
1523 throw armarx::LocalException() << ss.str();
1525 skipFrames = std::round(higherFramerate /
value) - 1;
1526 if (skipFrames != 0)
1528 ARMARX_INFO_S <<
"Only publishing every " << (skipFrames + 1) <<
"'th " << name
1539 skip = skipFramesCount;
1541 skipFramesCount %= (skipFrames + 1);