Task.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ArmarX.
3  *
4  * Copyright (C) 2011-2016, High Performance Humanoid Technologies (H2T), Karlsruhe Institute of Technology (KIT), all rights reserved.
5  *
6  * ArmarX is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * ArmarX is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  *
18  * @package RobotComponents
19  * @author Raphael Grimm ( raphael dot grimm at kit dot edu )
20  * @date 2015
21  * @copyright http://www.gnu.org/licenses/gpl.txt
22  * GNU General Public License
23  */
24 
26 
27 #include "../../util/Metrics.h"
28 #include <RobotComponents/interface/components/MotionPlanning/MotionPlanningServer.h>
29 #include <RobotComponents/interface/components/MotionPlanning/DataStructures.h>
30 
31 #include "Task.h"
32 #include "ManagerNode.h"
33 
34 namespace armarx::addirrtstar
35 {
36  PathWithCost Task::getBestPath(const Ice::Current&) const
37  {
38  std::lock_guard<std::recursive_mutex> lock {mutex};
39  if (finishedRunning())
40  {
41  return paths.at(0);
42  }
43 
44  if (isRunning())
45  {
46  return manager->getBestPath();
47  }
48 
49  //neither finished nor planning => did not start planning
50  PathWithCost noPathSentinel;
51  noPathSentinel.cost = std::numeric_limits<Ice::Float>::infinity();
52  return noPathSentinel;
53  }
54  Ice::Long Task::getPathCount(const Ice::Current&) const
55  {
56  std::lock_guard<std::recursive_mutex> lock {mutex};
57 
58  if (isRunning())
59  {
60  return manager->getPathCount();
61  }
62 
63  return paths.size();
64  }
65 
66  PathWithCost Task::getNthPathWithCost(Ice::Long index, const Ice::Current&) const
67  {
68  std::lock_guard<std::recursive_mutex> lock {mutex};
69 
70  if ((index < 0) || index >= getPathCount())
71  {
72  throw IndexOutOfBoundsException {};
73  }
74 
76 
77  if (isRunning())
78  {
79  ARMARX_CHECK_EXPRESSION(index < manager->getPathCount());
80  return manager->getNthPathWithCost(index);
81  }
82 
83  ARMARX_CHECK_EXPRESSION(static_cast<std::size_t>(index) < paths.size());
84  return paths.at(index);
85  }
86 
87  PathWithCostSeq Task::getAllPathsWithCost(const Ice::Current&) const
88  {
89  std::lock_guard<std::recursive_mutex> lock {mutex};
90 
91  if (isRunning())
92  {
93  return manager->getAllPathsWithCost();
94  }
95 
96  return paths;
97  }
98 
99  //PlanningControlInterface
100  void Task::abortTask(const Ice::Current&)
101  {
102  std::lock_guard<std::recursive_mutex> lock {mutex};
103 
104  if (getTaskStatus() == TaskStatus::eQueued || getTaskStatus() == TaskStatus::eNew)
105  {
106  ARMARX_VERBOSE_S << "task stopped";
107  //stop the task from executing
108  setTaskStatus(TaskStatus::ePlanningAborted);
109  }
110 
111  if (finishedRunning())
112  {
113  ARMARX_VERBOSE_S << "task already stopped";
114  //noop
115  return;
116  }
117 
118  ARMARX_VERBOSE_S << "stopping manager";
119  //should be planning
122  //manager will set the correct status and wake run up
123  manager->kill();
124  ARMARX_VERBOSE_S << "stopped manager";
125  }
126 
127  //PlanningTaskBase
128  void Task::run(const RemoteObjectNodePrxList& remoteNodes, const Ice::Current&)
129  {
130  ARMARX_CHECK_EXPRESSION(planningComputingPowerRequestStrategy);
131  //since post unmarshall is of this is called BEFORE post unmarshall of the member cspace is called this check can only be done here!
132  const auto dim = static_cast<std::size_t>(cspace->getDimensionality());
133  ARMARX_CHECK_EXPRESSION(startCfg.size() == dim);
134  ARMARX_CHECK_EXPRESSION(goalCfg.size() == dim);
135 
136  std::unique_lock<std::recursive_mutex> lock {mutex};
137 
138  if (getTaskStatus() != TaskStatus::eQueued)
139  {
140  //tried to restart finished task or already running task
141  ARMARX_WARNING_S << "Run was called with task status " << getTaskStatus();
142  return;
143  }
144 
145  //check for trivial cases
146  ARMARX_CHECK_EXPRESSION(cspace);
147  cspace->initCollisionTest();
148 
149  if (!(cspace->isCollisionFree(std::make_pair(startCfg.data(), startCfg.data() + startCfg.size()))))
150  {
151  ARMARX_VERBOSE_S << "trivial task! failed (start is not collision free): \n" << startCfg;
152  setTaskStatus(TaskStatus::ePlanningFailed);
153  return;
154  }
155 
156  if (!(cspace->isCollisionFree(std::make_pair(goalCfg.data(), goalCfg.data() + startCfg.size()))))
157  {
158  ARMARX_VERBOSE_S << "trivial task! failed (goal is not collision free): \n" << goalCfg;
159  setTaskStatus(TaskStatus::ePlanningFailed);
160  return;
161  }
162 
163  const auto distanceStartGoal = euclideanDistance(startCfg.begin(), startCfg.end(), goalCfg.begin());
164  ARMARX_VERBOSE_S << "distance from start to goal = " << distanceStartGoal;
165 
166  if (distanceStartGoal < dcdStep)
167  {
168  ARMARX_VERBOSE_S << "trivial task! done(the distance from start to goal is smaller than the DCD step size)";
169  paths.emplace_back(VectorXfSeq {startCfg, goalCfg}, distanceStartGoal, taskName + "_trivialPath_" + ice_staticId());
170  setTaskStatus(TaskStatus::eDone);
171  return;
172  }
173 
174  //calculate real target cost (if a cost < distanceStartGoal is used set it to distanceStartGoal)
175  targetCost = std::max(targetCost, distanceStartGoal);
176  //check params
177  ARMARX_CHECK_EXPRESSION(remoteNodes.size());
178  RemoteObjectNodeInterfacePrx rmObjNode = remoteNodes.at(0); //copy the proxy since it is const
179  ARMARX_CHECK_EXPRESSION(rmObjNode);
180  //create manager node
181  TaskBasePrx selfDerivedProxy = TaskBasePrx::uncheckedCast(getProxy());
182  ManagerNodePtr localManagerNode
183  {
184  new ManagerNode{
185  selfDerivedProxy, remoteNodes,
186  initialWorkerCount, maximalWorkerCount, planningComputingPowerRequestStrategy,
187  dcdStep, maximalPlanningTimeInSeconds, batchSize, nodeCountDeltaForGoalConnectionTries,
188  CSpaceBasePtr::dynamicCast(cspace->clone()),
189  startCfg, goalCfg,
190  addParams, targetCost
191  }
192  };
193  //register it
194  std::stringstream remoteObjectName;
195  remoteObjectName << localManagerNode->getName() << "@" << getProxy()->ice_getIdentity().name;
196 
197  ARMARX_DEBUG_S << "\n starting manager of task on " << rmObjNode
198  << "\n\t remoteObjectName = " << remoteObjectName.str();
199 
200  manager = rmObjNode->registerRemoteHandledObject(remoteObjectName.str(), localManagerNode);
201  //set status
202  setTaskStatus(TaskStatus::ePlanning);
203  //wait
204  ARMARX_VERBOSE_S << "waiting for manager";
205  managerDone.wait(lock, [this] {return finishedRunning();});
206  ARMARX_VERBOSE_S << "manager done";
207 
209  //get node count
210  cachedNodeCount = manager->getNodeCount();
211  //we have all required data from the manager
212  manager = nullptr;
213  ARMARX_VERBOSE_S << "done run";
214  }
215 
216  void Task::setPaths(const PathWithCostSeq& newPathList, const Ice::Current&)
217  {
218  std::lock_guard<std::recursive_mutex> lock {mutex};
219 
220  paths = newPathList;
221  std::sort(paths.begin(), paths.end(), [](const PathWithCost & lhs, const PathWithCost & rhs)
222  {
223  return lhs.cost < rhs.cost;
224  });
225  for (std::size_t i = 0; i < paths.size(); ++i)
226  {
227  paths.at(i).pathName = taskName + "_path_" + to_string(i) + ice_staticId();
228  }
229  }
230 
232  {
233  ARMARX_DEBUG_S << "\n checking parameters:"
234  << "\n\t initialWorkerCount " << initialWorkerCount
235  << "\n\t maximalWorkerCount " << maximalWorkerCount
236  << "\n\t dcdStep " << dcdStep
237  << "\n\t batchSize " << batchSize
238  << "\n\t cspace " << cspace.get()
239  << "\n\t addParams.alpha " << addParams.alpha
240  << "\n\t addParams.initialBorderRadius " << addParams.initialBorderRadius
241  << "\n\t addParams.minimalRadius " << addParams.minimalRadius
242  << "\n\t targetCost " << targetCost
243  << "\n\t dim cspace (-1 == null cspace)" << (cspace ? cspace->getDimensionality() : -1)
244  << "\n\t dim start " << startCfg.size()
245  << "\n\t dim goal " << goalCfg.size()
246  << "\n\t start " << startCfg
247  << "\n\t goal " << goalCfg;
248 
249  if (!(dcdStep > 0.f))
250  {
251  throw std::invalid_argument {"DCD stepsize <0"};
252  }
253 
254  if (!(cspace))
255  {
256  throw std::invalid_argument {"cspace == nullptr"};
257  }
258 
259  const auto dim = static_cast<std::size_t>(cspace->getDimensionality());
260 
261  if (!(startCfg.size() == dim))
262  {
263  throw std::invalid_argument {"Dimensions of cspace and start do not match"};
264  }
265 
266  if (!(goalCfg.size() == dim))
267  {
268  throw std::invalid_argument {"Dimensions of cspace and goal do not match"};
269  }
270 
271  if (!(addParams.alpha >= 0.f))
272  {
273  throw std::invalid_argument {"addParams.alpha < 0.f"};
274  }
275 
276  if (!(addParams.minimalRadius > 0.f))
277  {
278  throw std::invalid_argument {"addParams.minimalRadius <= 0.f"};
279  }
280 
281  if (!(addParams.initialBorderRadius > addParams.minimalRadius))
282  {
283  throw std::invalid_argument {"addParams.initialBorderRadius <= addParams.minimalRadius"};
284  }
285 
286  if (nodeCountDeltaForGoalConnectionTries < 1)
287  {
288  throw std::invalid_argument {"nodeCountDeltaForGoalConnectionTries < 1"};
289  }
290  if (!planningComputingPowerRequestStrategy)
291  {
292  throw std::invalid_argument {"planningComputingPowerRequestStrategy == nullptr"};
293  }
294  }
295 
296  Task::Task(//problem
297  CSpaceBasePtr cspace,
298  const cprs::ComputingPowerRequestStrategyBasePtr& planningComputingPowerRequestStrategy,
299  VectorXf startCfg,
300  VectorXf goalCfg,
301  const std::string& taskName,
302  Ice::Long maximalPlanningTimeInSeconds,
303  AdaptiveDynamicDomainParameters addParams,
304  float targetCost,
305  //general
306  float dcdStep,
307  Ice::Long batchSize,
308  Ice::Long nodeCountDeltaForGoalConnectionTries,
309  //management
310  Ice::Long initialWorkerCount,
311  Ice::Long maximalWorkerCount
312  )
313  {
314  this->cspace = cspace;
315  this->planningComputingPowerRequestStrategy = planningComputingPowerRequestStrategy;
316  this->startCfg = std::move(startCfg);
317  this->goalCfg = std::move(goalCfg);
318  this->taskName = taskName;
319  this->addParams = addParams;
320  this->targetCost = targetCost;
321  this->dcdStep = dcdStep;
322  this->maximalPlanningTimeInSeconds = maximalPlanningTimeInSeconds;
323  this->batchSize = batchSize;
324  this->nodeCountDeltaForGoalConnectionTries = nodeCountDeltaForGoalConnectionTries;
325  this->initialWorkerCount = initialWorkerCount;
326  this->maximalWorkerCount = maximalWorkerCount;
327  cachedNodeCount = 0;
328  checkParameters();
329  }
330 
331  Ice::Long Task::getNodeCount(const Ice::Current&) const
332  {
333  std::lock_guard<std::recursive_mutex> lock {mutex};
334 
335  if (isRunning())
336  {
337  return manager->getNodeCount();
338  }
339 
340  ARMARX_VERBOSE_S << "fetching node count from cache";
341  return cachedNodeCount;
342  }
343 
344  void Task::setMaxCpus(Ice::Int maxCpus, const Ice::Current&)
345  {
346  manager->setMaxCpus(maxCpus);
347  }
348 
349  Ice::Int Task::getMaxCpus(const Ice::Current&) const
350  {
351  return manager->getMaxCpus();
352  }
353 }
354 
armarx::addirrtstar::Task::setMaxCpus
void setMaxCpus(Ice::Int maxCpus, const Ice::Current &=Ice::emptyCurrent) override
Definition: Task.cpp:344
armarx::addirrtstar::Task::abortTask
void abortTask(const Ice::Current &=Ice::emptyCurrent) override
Aborts the task.
Definition: Task.cpp:100
armarx::addirrtstar::Task::checkParameters
void checkParameters()
Checks for illegal parameters.
Definition: Task.cpp:231
armarx::addirrtstar::Task::Task
Task()
Ctor used by object factories.
Definition: Task.h:172
armarx::addirrtstar::Task::manager
RemoteHandle< ManagerNodeBasePrx > manager
The manager node.
Definition: Task.h:195
index
uint8_t index
Definition: EtherCATFrame.h:59
armarx::MotionPlanningTaskCI::finishedRunning
bool finishedRunning(const Ice::Current &=Ice::emptyCurrent) const override
Returns whether the task has finished planning.
Definition: MotionPlanningTaskControlInterface.cpp:47
armarx::MotionPlanningTask::getProxy
MotionPlanningTaskBasePrx & getProxy()
Definition: MotionPlanningTask.h:78
armarx::addirrtstar::Task::getPathCount
Ice::Long getPathCount(const Ice::Current &=Ice::emptyCurrent) const override
Definition: Task.cpp:54
armarx::addirrtstar::Task::managerDone
std::condition_variable_any managerDone
CV used by the dispatcher thread to wait until planning is done.
Definition: Task.h:190
armarx::MotionPlanningTask::getTaskStatus
TaskStatus::Status getTaskStatus(const Ice::Current &=Ice::emptyCurrent) const override
Definition: MotionPlanningTask.h:85
armarx::addirrtstar::Task::getNthPathWithCost
PathWithCost getNthPathWithCost(Ice::Long index, const Ice::Current &=Ice::emptyCurrent) const override
Definition: Task.cpp:66
armarx::addirrtstar::ManagerNode
Manages the planning of the addirrt* algorithm.
Definition: ManagerNode.h:58
armarx::addirrtstar::Task::getAllPathsWithCost
PathWithCostSeq getAllPathsWithCost(const Ice::Current &=Ice::emptyCurrent) const override
Definition: Task.cpp:87
armarx::addirrtstar::Task::run
void run(const RemoteObjectNodePrxList &remoteNodes, const Ice::Current &=Ice::emptyCurrent) override
Runs the task.
Definition: Task.cpp:128
IceInternal::Handle
Definition: forward_declarations.h:8
ManagerNode.h
ARMARX_DEBUG_S
#define ARMARX_DEBUG_S
Definition: Logging.h:198
armarx::addirrtstar::Task::setPaths
void setPaths(const PathWithCostSeq &newPathList, const Ice::Current &=Ice::emptyCurrent) override
Used by the manager to store its found paths.
Definition: Task.cpp:216
armarx::VariantType::Long
const VariantTypeId Long
Definition: Variant.h:917
armarx::addirrtstar::Task::paths
PathWithCostSeq paths
All found paths.
Definition: Task.h:199
max
T max(T t1, T t2)
Definition: gdiam.h:48
armarx::addirrtstar::Task::cachedNodeCount
Ice::Long cachedNodeCount
The cahced node count.
Definition: Task.h:204
ARMARX_WARNING_S
#define ARMARX_WARNING_S
Definition: Logging.h:206
armarx::to_string
const std::string & to_string(const std::string &s)
Definition: StringHelpers.h:40
Task.h
ExpressionException.h
armarx::addirrtstar::Task::getBestPath
PathWithCost getBestPath(const Ice::Current &=Ice::emptyCurrent) const override
Definition: Task.cpp:36
armarx::euclideanDistance
float euclideanDistance(IteratorType1 first1, IteratorType1 last1, IteratorType2 first2)
Returns the euclidean distance.
Definition: Metrics.h:94
armarx::MotionPlanningTaskCI::isRunning
bool isRunning(const Ice::Current &=Ice::emptyCurrent) const override
Returns whether the task is currently planning.
Definition: MotionPlanningTaskControlInterface.cpp:42
ARMARX_CHECK_EXPRESSION
#define ARMARX_CHECK_EXPRESSION(expression)
This macro evaluates the expression and if it turns out to be false it will throw an ExpressionExcept...
Definition: ExpressionException.h:73
armarx::MotionPlanningTask::setTaskStatus
bool setTaskStatus(TaskStatus::Status newTaskStatus, const Ice::Current &=Ice::emptyCurrent) override
Definition: MotionPlanningTask.cpp:30
armarx::VariantType::Int
const VariantTypeId Int
Definition: Variant.h:916
ARMARX_VERBOSE_S
#define ARMARX_VERBOSE_S
Definition: Logging.h:200
armarx::addirrtstar::Task::getNodeCount
Ice::Long getNodeCount(const Ice::Current &=Ice::emptyCurrent) const override
Definition: Task.cpp:331
armarx::ManagedIceObject::getName
std::string getName() const
Retrieve name of object.
Definition: ManagedIceObject.cpp:107
armarx::addirrtstar::Task::mutex
std::recursive_mutex mutex
Mutex to protect internal structures.
Definition: Task.h:181
armarx::addirrtstar::Task::getMaxCpus
Ice::Int getMaxCpus(const Ice::Current &=Ice::emptyCurrent) const override
Definition: Task.cpp:349
armarx::addirrtstar
Definition: ManagerNode.cpp:35