152LevMar(IteratorT begin, IteratorT end, FuncT& func,
typename FuncT::ScalarType* param)
154 typedef typename FuncT::ScalarType ScalarType;
158 paramDim = FuncT::NumParams
162 unsigned int totalSize = end - begin;
167 ScalarType lambda = ScalarType(0.0001);
168 ScalarType* F0 =
new ScalarType[totalSize * paramDim];
169 ScalarType* U =
new ScalarType[paramDim * paramDim];
170 ScalarType* H =
new ScalarType[paramDim * paramDim];
171 ScalarType* v =
new ScalarType[paramDim];
172 ScalarType* d =
new ScalarType[totalSize];
173 ScalarType* temp =
new ScalarType[totalSize];
174 ScalarType*
x =
new ScalarType[paramDim];
175 ScalarType* p =
new ScalarType[paramDim];
176 ScalarType* paramNew =
new ScalarType[paramDim];
178 func.Normalize(param);
179 ScalarType paramNorm = 0;
182 unsigned int subsets =
183 std::max(
int(std::floor(std::log((
float)totalSize) / std::log(2.f))) - 8, 1);
184#ifdef PRECISIONLEVMAR
188 for (
unsigned int i = subsetSizes.
size(); i;)
191 subsetSizes[i] = totalSize;
194 subsetSizes[i] = subsetSizes[i] >> 1;
196 totalSize -= subsetSizes[i];
198 unsigned int curSubset = 0;
199 unsigned int size = 0;
202 ScalarType chi = 0, newChi = 0;
204 unsigned int outerIter = 0,
205#ifndef PRECISIONLEVMAR
206 maxOuterIter = 200 / subsetSizes.
size(),
210 usefulIter = 0, totalIter = 0;
215 size += subsetSizes[curSubset];
216 newChi = func.Chi(param, begin, begin + size, d, temp);
217 for (
unsigned int i = 0; i < paramDim; ++i)
219 paramNew[i] = param[i];
234 for (
size_t i = 0; i < paramDim; ++i)
236 param[i] = paramNew[i];
238#ifndef PRECISIONLEVMAR
239 if (std::sqrt(chi / size) <
247 for (
size_t i = 0; i < paramDim; ++i)
249 paramNorm += param[i] * param[i];
251 paramNorm = std::sqrt(paramNorm);
255 func.Derivatives(param, begin, begin + size, d, temp, F0);
258#pragma omp parallel for
259 for (
int i = 0; i < paramDim; ++i)
261 for (
size_t j = i; j < paramDim;
264 U[i * paramDim + j] = 0;
265 for (
size_t k = 0; k < size; ++k)
267 U[i * paramDim + j] += F0[k * paramDim + i] * F0[k * paramDim + j];
272#pragma omp parallel for
273 for (
int i = 0; i < paramDim; ++i)
276 for (
size_t k = 0; k < size; ++k)
278 v[i] += F0[k * paramDim + i] * d[k];
282 vmag = std::max(abs(v[i]), vmag);
286 for (
unsigned int i = 0; i < paramDim; ++i)
288 vmag = std::max(abs(v[i]), vmag);
292#ifndef PRECISIONLEVMAR
293 if (vmag < ScalarType(1e-6))
295 if (vmag < ScalarType(1e-8))
306 ScalarType fmag = abs(F0[0]);
307 for (
size_t i = 1; i < paramDim * size; ++i)
308 if (fmag < abs(F0[i]))
312 lambda = 1e-3f * fmag;
316 lambda *= std::max(ScalarType(.3), ScalarType(1 - std::pow(2 * rho - 1, 3)));
320 std::memcpy(H, U,
sizeof(ScalarType) * paramDim * paramDim);
321 for (
size_t i = 0; i < paramDim; ++i)
323 H[i * paramDim + i] += lambda;
327 ScalarType xNorm = 0, L = 0;
335 for (
size_t i = 0; i < paramDim; ++i)
337 xNorm +=
x[i] *
x[i];
339 xNorm = std::sqrt(xNorm);
340#ifndef PRECISIONLEVMAR
341 if (xNorm <= ScalarType(1e-6) * (paramNorm + ScalarType(1e-6)))
343 if (xNorm <= ScalarType(1e-8) * (paramNorm + ScalarType(1e-8)))
351 for (
size_t i = 0; i < paramDim; ++i)
353 paramNew[i] = param[i] +
x[i];
355 func.Normalize(paramNew);
358 newChi = func.Chi(paramNew, begin, begin + size, d, temp);
364 for (
size_t i = 0; i < paramDim; ++i)
366 L += .5f *
x[i] * (lambda *
x[i] + v[i]);
368 rho = (chi - newChi) / L;
372#ifndef PRECISIONLEVMAR
373 if ((chi - newChi) < 1e-4 * chi)
377 for (
size_t i = 0; i < paramDim; ++i)
379 param[i] = paramNew[i];
390 lambda = nu * lambda;
391 size_t nu2 = nu << 1;
397 }
while (outerIter < maxOuterIter);
399 }
while (curSubset < subsetSizes.
size());
400 retVal = usefulIter > 0;
421 size_t size = imgDim;
422 float lambda = ScalarT(0.0001);
423 ScalarT* F0 =
new ScalarT[size * paramDim];
424 ScalarT* U =
new ScalarT[paramDim * paramDim];
425 ScalarT* H =
new ScalarT[paramDim * paramDim];
426 ScalarT* v =
new ScalarT[paramDim];
427 ScalarT* d =
new ScalarT[size];
428 ScalarT* dNew =
new ScalarT[size];
429 ScalarT*
x =
new ScalarT[paramDim];
430 ScalarT* p =
new ScalarT[paramDim];
431 ScalarT* paramNew =
new ScalarT[paramDim];
432 size_t outerIter = 0, maxOuterIter = 10;
434 ScalarT chi = 0, newChi;
435 for (
size_t i = 0; i < size; ++i)
437 d[i] = (*(funcs[i]))(param);
443 lambda *= ScalarT(0.04);
447 for (
size_t i = 0; i < size; ++i)
449 (*(funcs[i]))(param, &F0[i * paramDim]);
452 for (
size_t i = 0; i < paramDim; ++i)
453 for (
size_t j = i; j < paramDim; ++j)
455 U[i * paramDim + j] = 0;
456 for (
size_t k = 0; k < size; ++k)
457 U[i * paramDim + j] += F0[k * paramDim + i] * F0[k * paramDim + j];
460 for (
size_t i = 0; i < paramDim; ++i)
463 for (
size_t k = 0; k < size; ++k)
465 v[i] += F0[k * paramDim + i] * d[k];
469 size_t iter = 0, maxIter = 10;
474 lambda = 10 * lambda;
475 memcpy(H, U,
sizeof(ScalarT) * paramDim * paramDim);
476 for (
size_t i = 0; i < paramDim; ++i)
478 H[i * paramDim + i] += lambda * (ScalarT(1) + H[i * paramDim + i]);
487 for (
size_t i = 0; i < paramDim; ++i)
489 paramNew[i] = param[i] +
x[i];
493 for (
size_t i = 0; i < size; ++i)
495 dNew[i] = (*(funcs[i]))(paramNew);
496 newChi += dNew[i] * dNew[i];
511 if ( abs(chi - newChi) / chi < ScalarT(1e-4))
513 for (
size_t i = 0; i < paramDim; ++i)
515 param[i] = paramNew[i];
520 }
while (newChi > chi && iter < maxIter);
524 for (
size_t i = 0; i < paramDim; ++i)
526 param[i] = paramNew[i];
530 }
while (outerIter < maxOuterIter);