32 #include "../../util/Samplers.h"
53 remoteObjectNodes.end(),
55 [](
const RemoteObjectNodeInterfacePrx& prx)
57 const auto count = static_cast<std::size_t>(prx->getNumberOfCores());
58 ARMARX_VERBOSE_S <<
"ROI " << prx.get() <<
" has " << count <<
" cores";
62 const Ice::Long maxROCount = std::accumulate(
64 maximalWorkerCount =
std::min(maximalWorkerCount, maxROCount);
65 initialWorkerCount =
std::min(initialWorkerCount, maxROCount);
69 tree.
init(FullIceTree{FullNodeDataListList{FullNodeDataList{FullNodeData{
72 std::numeric_limits<Ice::Float>::infinity(),
93 ARMARX_VERBOSE_S <<
"armarx::addirrtstar::ManagerNode::onConnectComponent()";
111 std::lock_guard<std::mutex> lock{
treeMutex};
113 path.nodes.emplace_back(goalNode);
120 std::lock_guard<std::mutex> lock{
treeMutex};
127 std::lock_guard<std::mutex> lock{
treeMutex};
129 path.nodes.emplace_back(goalNode);
136 std::lock_guard<std::mutex> lock{
treeMutex};
138 PathWithCostSeq paths{pathCount};
140 for (std::size_t i = 0; i < pathCount; ++i)
143 paths.at(i).nodes.emplace_back(goalNode);
152 ARMARX_WARNING_S <<
"worker requested remote update w/u " << workerId <<
"/" << updateId;
157 const auto uWorkerId =
static_cast<std::size_t
>(workerId);
158 const auto uUpdateId =
static_cast<std::size_t
>(updateId);
174 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
175 std::lock_guard<std::mutex> updateLock(
updateMutex, std::adopt_lock);
186 const auto dim =
static_cast<std::size_t
>(cspace->getDimensionality());
189 std::make_pair(&startNode.front(), &startNode.back() + 1)));
192 std::make_pair(&goalNode.front(), &goalNode.back() + 1)));
197 startNode.begin(), startNode.end(), goalNode.begin());
206 for (
Ice::Long i = 0; i < initialWorkerCount; ++i)
212 TaskStatus::Status
status = TaskStatus::ePlanning;
213 planningComputingPowerRequestStrategy->updateTaskStatus(TaskStatus::ePlanning);
215 std::mutex mutexWait;
216 std::unique_lock<std::mutex> waitingLock{mutexWait};
219 planningComputingPowerRequestStrategy->setCurrentStateAsInitialState();
223 managerEvent.wait_for(waitingLock, std::chrono::milliseconds{100});
237 << maximalPlanningTimeInSeconds <<
" seconds)";
250 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
251 std::unique_lock<std::mutex> updateLock(
updateMutex, std::adopt_lock);
261 status = TaskStatus::eRefining;
262 task->setTaskStatus(
status);
264 planningComputingPowerRequestStrategy->updateTaskStatus(
265 TaskStatus::eRefining);
269 planningComputingPowerRequestStrategy->updateNodeCount(
tree.
size());
281 planningComputingPowerRequestStrategy->shouldAllocateComputingPower())
305 if ((
status == TaskStatus::ePlanningAborted ||
status == TaskStatus::ePlanningFailed) &&
309 task->setTaskStatus(TaskStatus::eRefining);
310 status = TaskStatus::eRefinementAborted;
315 task->setTaskStatus(
status);
319 #define common_exception_output \
321 << "\n\ttask name: " << task->getTaskName() << "\n\tice id = " << task->ice_id() \
322 << "\n\told status " << TaskStatus::toString(task->getTaskStatus())
323 catch (Ice::Exception& e)
326 << e.what() <<
"\n\tSTACK:\n"
327 << e.ice_stackTrace();
328 task->setTaskStatus(TaskStatus::eException);
330 catch (std::exception& e)
333 task->setTaskStatus(TaskStatus::eException);
338 <<
"\n\tsomething not derived from std::exception was thrown";
339 task->setTaskStatus(TaskStatus::eException);
341 #undef common_exception_output
351 RemoteObjectNodeInterfacePrx& ron = remoteObjectNodes.at(remoteObjectNodeIndex);
356 ManagerNodeBasePrx selfProxy = ManagerNodeBasePrx::uncheckedCast(
getProxy());
367 nodeCountDeltaForGoalConnectionTries,
370 std::stringstream newWorkerName;
371 newWorkerName << newWorker->getName() <<
":" << newWorkerId <<
"@[" <<
getName() <<
"]";
374 workers.emplace_back(ron->registerRemoteHandledObject(newWorkerName.str(), newWorker));
380 <<
"\n\t RON index: " << remoteObjectNodeIndex
382 planningComputingPowerRequestStrategy->allocatedComputingPower();
388 if (
getWorkerCount() >=
static_cast<std::size_t
>(maximalWorkerCount))
396 std::size_t ronIndex = 0;
413 ARMARX_WARNING_S <<
"manager requested remote update w/u " << workerId <<
"/" << updateId;
415 return workers.at(workerId)->getUpdate(updateId);
455 ARMARX_VERBOSE_S <<
"final update id for worker " << workerId <<
" is " << finalUpdateId;
477 worker->killWorker();
483 std::unique_lock<std::mutex> treeLock(
treeMutex, std::adopt_lock);
484 std::unique_lock<std::mutex> updateLock(
updateMutex, std::adopt_lock);
489 managerEvent.wait_for(updateLock, std::chrono::milliseconds{100});
492 [](
const Ice::Long& l) { return l == -1; }));
498 [
this](std::size_t workerId,
Ice::Long updateId)
510 const auto workerAppliedUpdateCount =
appliedUpdates.at(i).size();
511 const auto workerUpdateCount =
515 static_cast<Ice::Long>(workerAppliedUpdateCount) - 1);
532 [
this](std::size_t workerId,
Ice::Long updateId)
539 planningComputingPowerRequestStrategy->updateNodeCreations(u.nodes.size(),
551 const auto workerId = getUpdatesWorkerId(u);
566 std::lock_guard<std::mutex> treeLock{
treeMutex};
573 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
580 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
582 std::chrono::seconds{maximalPlanningTimeInSeconds});
588 maximalWorkerCount = maxCpus;
594 return maximalWorkerCount;