Task.cpp
Go to the documentation of this file.
1/*
2 * This file is part of ArmarX.
3 *
4 * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5 *
6 * ArmarX is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ArmarX is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * @package RobotComponents
19 * @author Raphael Grimm ( raphael dot grimm at kit dot edu )
20 * @date 2015
21 * @copyright http://www.gnu.org/licenses/gpl.txt
22 * GNU General Public License
23 */
24
25#include "Task.h"
26
28
29#include <RobotComponents/interface/components/MotionPlanning/DataStructures.h>
30#include <RobotComponents/interface/components/MotionPlanning/MotionPlanningServer.h>
31
32#include "../../util/Metrics.h"
33#include "ManagerNode.h"
34
35namespace armarx::addirrtstar
36{
37 PathWithCost
38 Task::getBestPath(const Ice::Current&) const
39 {
40 std::lock_guard<std::recursive_mutex> lock{mutex};
41 if (finishedRunning())
42 {
43 return paths.at(0);
44 }
45
46 if (isRunning())
47 {
48 return manager->getBestPath();
49 }
50
51 //neither finished nor planning => did not start planning
52 PathWithCost noPathSentinel;
53 noPathSentinel.cost = std::numeric_limits<Ice::Float>::infinity();
54 return noPathSentinel;
55 }
56
57 Ice::Long
58 Task::getPathCount(const Ice::Current&) const
59 {
60 std::lock_guard<std::recursive_mutex> lock{mutex};
61
62 if (isRunning())
63 {
64 return manager->getPathCount();
65 }
66
67 return paths.size();
68 }
69
70 PathWithCost
71 Task::getNthPathWithCost(Ice::Long index, const Ice::Current&) const
72 {
73 std::lock_guard<std::recursive_mutex> lock{mutex};
74
75 if ((index < 0) || index >= getPathCount())
76 {
77 throw IndexOutOfBoundsException{};
78 }
79
81
82 if (isRunning())
83 {
85 return manager->getNthPathWithCost(index);
86 }
87
88 ARMARX_CHECK_EXPRESSION(static_cast<std::size_t>(index) < paths.size());
89 return paths.at(index);
90 }
91
92 PathWithCostSeq
93 Task::getAllPathsWithCost(const Ice::Current&) const
94 {
95 std::lock_guard<std::recursive_mutex> lock{mutex};
96
97 if (isRunning())
98 {
99 return manager->getAllPathsWithCost();
100 }
101
102 return paths;
103 }
104
105 //PlanningControlInterface
106 void
107 Task::abortTask(const Ice::Current&)
108 {
109 std::lock_guard<std::recursive_mutex> lock{mutex};
110
111 if (getTaskStatus() == TaskStatus::eQueued || getTaskStatus() == TaskStatus::eNew)
112 {
113 ARMARX_VERBOSE_S << "task stopped";
114 //stop the task from executing
115 setTaskStatus(TaskStatus::ePlanningAborted);
116 }
117
118 if (finishedRunning())
119 {
120 ARMARX_VERBOSE_S << "task already stopped";
121 //noop
122 return;
123 }
124
125 ARMARX_VERBOSE_S << "stopping manager";
126 //should be planning
129 //manager will set the correct status and wake run up
130 manager->kill();
131 ARMARX_VERBOSE_S << "stopped manager";
132 }
133
134 //PlanningTaskBase
135 void
136 Task::run(const RemoteObjectNodePrxList& remoteNodes, const Ice::Current&)
137 {
138 ARMARX_CHECK_EXPRESSION(planningComputingPowerRequestStrategy);
139 //since post unmarshall is of this is called BEFORE post unmarshall of the member cspace is called this check can only be done here!
140 const auto dim = static_cast<std::size_t>(cspace->getDimensionality());
141 ARMARX_CHECK_EXPRESSION(startCfg.size() == dim);
142 ARMARX_CHECK_EXPRESSION(goalCfg.size() == dim);
143
144 std::unique_lock<std::recursive_mutex> lock{mutex};
145
146 if (getTaskStatus() != TaskStatus::eQueued)
147 {
148 //tried to restart finished task or already running task
149 ARMARX_WARNING_S << "Run was called with task status " << getTaskStatus();
150 return;
151 }
152
153 //check for trivial cases
155 cspace->initCollisionTest();
156
157 if (!(cspace->isCollisionFree(
158 std::make_pair(startCfg.data(), startCfg.data() + startCfg.size()))))
159 {
160 ARMARX_VERBOSE_S << "trivial task! failed (start is not collision free): \n"
161 << startCfg;
162 setTaskStatus(TaskStatus::ePlanningFailed);
163 return;
164 }
165
166 if (!(cspace->isCollisionFree(
167 std::make_pair(goalCfg.data(), goalCfg.data() + startCfg.size()))))
168 {
169 ARMARX_VERBOSE_S << "trivial task! failed (goal is not collision free): \n" << goalCfg;
170 setTaskStatus(TaskStatus::ePlanningFailed);
171 return;
172 }
173
174 const auto distanceStartGoal =
175 euclideanDistance(startCfg.begin(), startCfg.end(), goalCfg.begin());
176 ARMARX_VERBOSE_S << "distance from start to goal = " << distanceStartGoal;
177
178 if (distanceStartGoal < dcdStep)
179 {
180 ARMARX_VERBOSE_S << "trivial task! done(the distance from start to goal is smaller "
181 "than the DCD step size)";
182 paths.emplace_back(VectorXfSeq{startCfg, goalCfg},
183 distanceStartGoal,
184 taskName + "_trivialPath_" + ice_staticId());
185 setTaskStatus(TaskStatus::eDone);
186 return;
187 }
188
189 //calculate real target cost (if a cost < distanceStartGoal is used set it to distanceStartGoal)
190 targetCost = std::max(targetCost, distanceStartGoal);
191 //check params
192 ARMARX_CHECK_EXPRESSION(remoteNodes.size());
193 RemoteObjectNodeInterfacePrx rmObjNode =
194 remoteNodes.at(0); //copy the proxy since it is const
195 ARMARX_CHECK_EXPRESSION(rmObjNode);
196 //create manager node
197 TaskBasePrx selfDerivedProxy = TaskBasePrx::uncheckedCast(getProxy());
198 ManagerNodePtr localManagerNode{new ManagerNode{selfDerivedProxy,
199 remoteNodes,
200 initialWorkerCount,
201 maximalWorkerCount,
202 planningComputingPowerRequestStrategy,
203 dcdStep,
204 maximalPlanningTimeInSeconds,
205 batchSize,
206 nodeCountDeltaForGoalConnectionTries,
207 CSpaceBasePtr::dynamicCast(cspace->clone()),
208 startCfg,
209 goalCfg,
210 addParams,
211 targetCost}};
212 //register it
213 std::stringstream remoteObjectName;
214 remoteObjectName << localManagerNode->getName() << "@"
215 << getProxy()->ice_getIdentity().name;
216
217 ARMARX_DEBUG_S << "\n starting manager of task on " << rmObjNode
218 << "\n\t remoteObjectName = " << remoteObjectName.str();
219
220 manager = rmObjNode->registerRemoteHandledObject(remoteObjectName.str(), localManagerNode);
221 //set status
222 setTaskStatus(TaskStatus::ePlanning);
223 //wait
224 ARMARX_VERBOSE_S << "waiting for manager";
225 managerDone.wait(lock, [this] { return finishedRunning(); });
226 ARMARX_VERBOSE_S << "manager done";
227
229 //get node count
230 cachedNodeCount = manager->getNodeCount();
231 //we have all required data from the manager
232 manager = nullptr;
233 ARMARX_VERBOSE_S << "done run";
234 }
235
236 void
237 Task::setPaths(const PathWithCostSeq& newPathList, const Ice::Current&)
238 {
239 std::lock_guard<std::recursive_mutex> lock{mutex};
240
241 paths = newPathList;
242 std::sort(paths.begin(),
243 paths.end(),
244 [](const PathWithCost& lhs, const PathWithCost& rhs)
245 { return lhs.cost < rhs.cost; });
246 for (std::size_t i = 0; i < paths.size(); ++i)
247 {
248 paths.at(i).pathName = taskName + "_path_" + to_string(i) + ice_staticId();
249 }
250 }
251
252 void
254 {
255 ARMARX_DEBUG_S << "\n checking parameters:"
256 << "\n\t initialWorkerCount " << initialWorkerCount
257 << "\n\t maximalWorkerCount " << maximalWorkerCount << "\n\t dcdStep "
258 << dcdStep << "\n\t batchSize " << batchSize << "\n\t cspace "
259 << cspace.get() << "\n\t addParams.alpha " << addParams.alpha
260 << "\n\t addParams.initialBorderRadius " << addParams.initialBorderRadius
261 << "\n\t addParams.minimalRadius " << addParams.minimalRadius
262 << "\n\t targetCost " << targetCost << "\n\t dim cspace (-1 == null cspace)"
263 << (cspace ? cspace->getDimensionality() : -1) << "\n\t dim start "
264 << startCfg.size() << "\n\t dim goal " << goalCfg.size() << "\n\t start "
265 << startCfg << "\n\t goal " << goalCfg;
266
267 if (!(dcdStep > 0.f))
268 {
269 throw std::invalid_argument{"DCD stepsize <0"};
270 }
271
272 if (!(cspace))
273 {
274 throw std::invalid_argument{"cspace == nullptr"};
275 }
276
277 const auto dim = static_cast<std::size_t>(cspace->getDimensionality());
278
279 if (!(startCfg.size() == dim))
280 {
281 throw std::invalid_argument{"Dimensions of cspace and start do not match"};
282 }
283
284 if (!(goalCfg.size() == dim))
285 {
286 throw std::invalid_argument{"Dimensions of cspace and goal do not match"};
287 }
288
289 if (!(addParams.alpha >= 0.f))
290 {
291 throw std::invalid_argument{"addParams.alpha < 0.f"};
292 }
293
294 if (!(addParams.minimalRadius > 0.f))
295 {
296 throw std::invalid_argument{"addParams.minimalRadius <= 0.f"};
297 }
298
299 if (!(addParams.initialBorderRadius > addParams.minimalRadius))
300 {
301 throw std::invalid_argument{"addParams.initialBorderRadius <= addParams.minimalRadius"};
302 }
303
304 if (nodeCountDeltaForGoalConnectionTries < 1)
305 {
306 throw std::invalid_argument{"nodeCountDeltaForGoalConnectionTries < 1"};
307 }
308 if (!planningComputingPowerRequestStrategy)
309 {
310 throw std::invalid_argument{"planningComputingPowerRequestStrategy == nullptr"};
311 }
312 }
313
314 Task::Task( //problem
315 CSpaceBasePtr cspace,
316 const cprs::ComputingPowerRequestStrategyBasePtr& planningComputingPowerRequestStrategy,
317 VectorXf startCfg,
318 VectorXf goalCfg,
319 const std::string& taskName,
320 Ice::Long maximalPlanningTimeInSeconds,
321 AdaptiveDynamicDomainParameters addParams,
322 float targetCost,
323 //general
324 float dcdStep,
325 Ice::Long batchSize,
326 Ice::Long nodeCountDeltaForGoalConnectionTries,
327 //management
328 Ice::Long initialWorkerCount,
329 Ice::Long maximalWorkerCount)
330 {
331 this->cspace = cspace;
332 this->planningComputingPowerRequestStrategy = planningComputingPowerRequestStrategy;
333 this->startCfg = std::move(startCfg);
334 this->goalCfg = std::move(goalCfg);
335 this->taskName = taskName;
336 this->addParams = addParams;
337 this->targetCost = targetCost;
338 this->dcdStep = dcdStep;
339 this->maximalPlanningTimeInSeconds = maximalPlanningTimeInSeconds;
340 this->batchSize = batchSize;
341 this->nodeCountDeltaForGoalConnectionTries = nodeCountDeltaForGoalConnectionTries;
342 this->initialWorkerCount = initialWorkerCount;
343 this->maximalWorkerCount = maximalWorkerCount;
344 cachedNodeCount = 0;
346 }
347
348 Ice::Long
349 Task::getNodeCount(const Ice::Current&) const
350 {
351 std::lock_guard<std::recursive_mutex> lock{mutex};
352
353 if (isRunning())
354 {
355 return manager->getNodeCount();
356 }
357
358 ARMARX_VERBOSE_S << "fetching node count from cache";
359 return cachedNodeCount;
360 }
361
362 void
363 Task::setMaxCpus(Ice::Int maxCpus, const Ice::Current&)
364 {
365 manager->setMaxCpus(maxCpus);
366 }
367
368 Ice::Int
369 Task::getMaxCpus(const Ice::Current&) const
370 {
371 return manager->getMaxCpus();
372 }
373} // namespace armarx::addirrtstar
uint8_t index
bool isRunning(const Ice::Current &=Ice::emptyCurrent) const override
Returns whether the task is currently planning.
bool finishedRunning(const Ice::Current &=Ice::emptyCurrent) const override
Returns whether the task has finished planning.
bool setTaskStatus(TaskStatus::Status newTaskStatus, const Ice::Current &=Ice::emptyCurrent) override
MotionPlanningTaskBasePrx & getProxy()
TaskStatus::Status getTaskStatus(const Ice::Current &=Ice::emptyCurrent) const override
Manages the planning of the addirrt* algorithm.
Definition ManagerNode.h:59
void setPaths(const PathWithCostSeq &newPathList, const Ice::Current &=Ice::emptyCurrent) override
Used by the manager to store its found paths.
Definition Task.cpp:237
PathWithCostSeq getAllPathsWithCost(const Ice::Current &=Ice::emptyCurrent) const override
Definition Task.cpp:93
Ice::Long getPathCount(const Ice::Current &=Ice::emptyCurrent) const override
Definition Task.cpp:58
PathWithCost getNthPathWithCost(Ice::Long index, const Ice::Current &=Ice::emptyCurrent) const override
Definition Task.cpp:71
RemoteHandle< ManagerNodeBasePrx > manager
The manager node.
Definition Task.h:198
PathWithCostSeq paths
All found paths.
Definition Task.h:202
std::condition_variable_any managerDone
CV used by the dispatcher thread to wait until planning is done.
Definition Task.h:193
std::recursive_mutex mutex
Mutex to protect internal structures.
Definition Task.h:184
void abortTask(const Ice::Current &=Ice::emptyCurrent) override
Aborts the task.
Definition Task.cpp:107
PathWithCost getBestPath(const Ice::Current &=Ice::emptyCurrent) const override
Definition Task.cpp:38
void setMaxCpus(Ice::Int maxCpus, const Ice::Current &=Ice::emptyCurrent) override
Definition Task.cpp:363
void checkParameters()
Checks for illegal parameters.
Definition Task.cpp:253
Task()
Ctor used by object factories.
Definition Task.h:177
Ice::Long cachedNodeCount
The cahced node count.
Definition Task.h:207
Ice::Long getNodeCount(const Ice::Current &=Ice::emptyCurrent) const override
Definition Task.cpp:349
Ice::Int getMaxCpus(const Ice::Current &=Ice::emptyCurrent) const override
Definition Task.cpp:369
void run(const RemoteObjectNodePrxList &remoteNodes, const Ice::Current &=Ice::emptyCurrent) override
Runs the task.
Definition Task.cpp:136
#define ARMARX_CHECK_EXPRESSION(expression)
This macro evaluates the expression and if it turns out to be false it will throw an ExpressionExcept...
#define ARMARX_DEBUG_S
The logging level for output that is only interesting while debugging.
Definition Logging.h:205
#define ARMARX_VERBOSE_S
Definition Logging.h:207
#define ARMARX_WARNING_S
The logging level for unexpected behaviour, but not a serious problem.
Definition Logging.h:213
IceInternal::Handle< ManagerNode > ManagerNodePtr
An ice handle for a ManagerNode.
Definition ManagerNode.h:52
float euclideanDistance(IteratorType1 first1, IteratorType1 last1, IteratorType2 first2)
Returns the euclidean distance.
Definition Metrics.h:104
const std::string & to_string(const std::string &s)