29 #include "../../util/Samplers.h"
51 remoteObjectNodes.begin(), remoteObjectNodes.end(),
53 [](
const RemoteObjectNodeInterfacePrx & prx)
55 const auto count = static_cast<std::size_t>(prx->getNumberOfCores());
56 ARMARX_VERBOSE_S <<
"ROI " << prx.get() <<
" has " << count <<
" cores";
62 maximalWorkerCount =
std::min(maximalWorkerCount, maxROCount);
63 initialWorkerCount =
std::min(initialWorkerCount, maxROCount);
75 std::numeric_limits<Ice::Float>::infinity(),
98 ARMARX_VERBOSE_S <<
"armarx::addirrtstar::ManagerNode::onConnectComponent()";
115 std::lock_guard<std::mutex> lock {
treeMutex};
117 path.nodes.emplace_back(goalNode);
122 std::lock_guard<std::mutex> lock {
treeMutex};
127 std::lock_guard<std::mutex> lock {
treeMutex};
129 path.nodes.emplace_back(goalNode);
135 std::lock_guard<std::mutex> lock {
treeMutex};
137 PathWithCostSeq paths {pathCount};
139 for (std::size_t i = 0; i < pathCount; ++i)
142 paths.at(i).nodes.emplace_back(goalNode);
150 ARMARX_WARNING_S <<
"worker requested remote update w/u " << workerId <<
"/" << updateId;
155 const auto uWorkerId =
static_cast<std::size_t
>(workerId);
156 const auto uUpdateId =
static_cast<std::size_t
>(updateId);
171 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
172 std::lock_guard<std::mutex> updateLock(
updateMutex, std::adopt_lock);
182 const auto dim =
static_cast<std::size_t
>(cspace->getDimensionality());
184 ARMARX_CHECK_EXPRESSION(cspace->isValidConfiguration(std::make_pair(&startNode.front(), &startNode.back() + 1)));
186 ARMARX_CHECK_EXPRESSION(cspace->isValidConfiguration(std::make_pair(&goalNode.front(), &goalNode.back() + 1)));
199 for (
Ice::Long i = 0; i < initialWorkerCount; ++i)
205 TaskStatus::Status
status = TaskStatus::ePlanning;
206 planningComputingPowerRequestStrategy->updateTaskStatus(TaskStatus::ePlanning);
208 std::mutex mutexWait;
209 std::unique_lock<std::mutex> waitingLock {mutexWait};
212 planningComputingPowerRequestStrategy->setCurrentStateAsInitialState();
216 managerEvent.wait_for(waitingLock, std::chrono::milliseconds {100});
229 ARMARX_IMPORTANT <<
"Planning Task encountered timeout (" << maximalPlanningTimeInSeconds <<
" seconds)";
242 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
243 std::unique_lock<std::mutex> updateLock(
updateMutex, std::adopt_lock);
253 status = TaskStatus::eRefining;
254 task->setTaskStatus(
status);
256 planningComputingPowerRequestStrategy->updateTaskStatus(TaskStatus::eRefining);
260 planningComputingPowerRequestStrategy->updateNodeCount(
tree.
size());
270 if ((
getActiveWorkerCount() <
static_cast<std::size_t
>(maximalWorkerCount)) && planningComputingPowerRequestStrategy->shouldAllocateComputingPower())
297 task->setTaskStatus(TaskStatus::eRefining);
298 status = TaskStatus::eRefinementAborted;
303 task->setTaskStatus(
status);
307 #define common_exception_output "EXCEPTION!\n"\
308 << "\n\ttask name: " << task->getTaskName()\
309 << "\n\tice id = " << task->ice_id()\
310 << "\n\told status " << TaskStatus::toString(task->getTaskStatus())
311 catch (Ice::Exception& e)
314 <<
"\n\tWHAT:\n" << e.what()
315 <<
"\n\tSTACK:\n" << e.ice_stackTrace();
316 task->setTaskStatus(TaskStatus::eException);
318 catch (std::exception& e)
321 <<
"\n\tWHAT:\n" << e.what();
322 task->setTaskStatus(TaskStatus::eException);
327 <<
"\n\tsomething not derived from std::exception was thrown";
328 task->setTaskStatus(TaskStatus::eException);
330 #undef common_exception_output
341 RemoteObjectNodeInterfacePrx& ron = remoteObjectNodes.at(remoteObjectNodeIndex);
346 ManagerNodeBasePrx selfProxy = ManagerNodeBasePrx::uncheckedCast(
getProxy());
350 CSpaceBasePtr::dynamicCast(cspace->clone()),
351 startNode, goalNode, dcdStep, addParams,
355 nodeCountDeltaForGoalConnectionTries,
360 std::stringstream newWorkerName;
361 newWorkerName << newWorker->getName() <<
":" << newWorkerId <<
"@[" <<
getName() <<
"]";
364 workers.emplace_back(ron->registerRemoteHandledObject(newWorkerName.str(), newWorker));
370 <<
"\n\t RON index: " << remoteObjectNodeIndex
372 planningComputingPowerRequestStrategy->allocatedComputingPower();
377 if (
getWorkerCount() >=
static_cast<std::size_t
>(maximalWorkerCount))
385 std::size_t ronIndex = 0;
387 for (; (ronIndex < remoteObjectNodes.size()) &&
398 ARMARX_WARNING_S <<
"manager requested remote update w/u " << workerId <<
"/" << updateId;
400 return workers.at(workerId)->getUpdate(updateId);
431 ARMARX_VERBOSE_S <<
"final update id for worker " << workerId <<
" is " << finalUpdateId;
451 worker->killWorker();
457 std::unique_lock<std::mutex> treeLock(
treeMutex, std::adopt_lock);
458 std::unique_lock<std::mutex> updateLock(
updateMutex, std::adopt_lock);
463 managerEvent.wait_for(updateLock, std::chrono::milliseconds {100});
475 [
this](std::size_t workerId,
Ice::Long updateId)
490 const auto workerAppliedUpdateCount =
appliedUpdates.at(i).size();
508 [
this](std::size_t workerId,
Ice::Long updateId)
515 planningComputingPowerRequestStrategy->updateNodeCreations(u.nodes.size(), batchSize);
526 const auto workerId = getUpdatesWorkerId(u);
540 std::lock_guard<std::mutex> treeLock {
treeMutex};
546 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
552 std::lock_guard<std::mutex> treeLock(
treeMutex, std::adopt_lock);
553 return ((ClockType::now() -
timepointStart) >= std::chrono::seconds {maximalPlanningTimeInSeconds});
558 maximalWorkerCount = maxCpus;
563 return maximalWorkerCount;