Ocean
Loading...
Searching...
No Matches
NonLinearOptimization.h
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7
8#ifndef META_OCEAN_GEOMETRY_NON_LINEAR_OPTIMIZATION_H
9#define META_OCEAN_GEOMETRY_NON_LINEAR_OPTIMIZATION_H
10
13
14#include "ocean/base/Accessor.h"
16
17#include "ocean/math/Matrix.h"
20
21namespace Ocean
22{
23
24namespace Geometry
25{
26
27/**
28 * This class implements the basic functions for least square or robust optimization algorithms for non linear functions.
29 * The actual optimization algorithms are located in derived functions.
30 * @ingroup geometry
31 */
32class OCEAN_GEOMETRY_EXPORT NonLinearOptimization
33{
34 template <unsigned int, unsigned int, unsigned int> friend class NonLinearUniversalOptimizationDense;
35
36 public:
37
38 /**
39 * This class implements the base class for an accessor of groups of pairs.
40 * Each group of pairs may have an arbitrary number of elements.
41 * @tparam TFirst The data type of the first element of each pair
42 * @tparam TSecond The data type of the second element of each pair
43 */
44 template <typename TFirst, typename TSecond>
46 {
47 protected:
48
49 /**
50 * Definition of a pair combining an object point id with an image point.
51 */
52 using Element = std::pair<Index32, Vector2>;
53
54 /**
55 * Definition of a vector holding elements.
56 */
57 using Elements = std::vector<Element>;
58
59 /**
60 * Definition of a vector holding a group of elements.
61 */
62 using ElementGroups = std::vector<Elements>;
63
64 public:
65
66 /**
67 * Copy constructor.
68 * @param accessor Accessor to copy
69 */
71
72 /**
73 * Move constructor.
74 * @param accessor Accessor to move
75 */
77
78 /**
79 * Returns the number of groups of this accessor.
80 * @return The number of groups
81 */
82 inline size_t groups() const;
83
84 /**
85 * Returns the number of elements within a specified group.
86 * @param groupIndex The index of the group for which the number of elements is requested, with range [0, groups())
87 * @return The number of elements within the specified group
88 */
89 inline size_t groupElements(const size_t groupIndex) const;
90
91 /**
92 * Returns one pair of a specific group of this object.
93 * @param groupIndex The index of the group, with range [0, groups())
94 * @param elementIndex The index of the element within the specified group, with range [0, groupElements(groupIndex))
95 * @param first The resulting first element of the specified pair
96 * @param second The resulting second element of the specified pair
97 */
98 inline void element(const size_t groupIndex, const size_t elementIndex, TFirst& first, TSecond& second) const;
99
100 /**
101 * Returns the first element of a pair of a specific group of this object.
102 * @param groupIndex The index of the group, with range [0, groups())
103 * @param elementIndex The index of the element within the specified group, with range [0, groupElements(groupIndex))
104 * @return The first element of the specified pair
105 */
106 inline const TFirst& firstElement(const size_t groupIndex, const size_t elementIndex) const;
107
108 /**
109 * Returns the first element of a pair of a specific group of this object.
110 * @param groupIndex The index of the group, with range [0, groups())
111 * @param elementIndex The index of the element within the specified group, with range [0, groupElements(groupIndex))
112 * @return The second element of the specified pair
113 */
114 inline const TSecond& secondElement(const size_t groupIndex, const size_t elementIndex) const;
115
116 /**
117 * Copy constructor.
118 * @param accessor Accessor to copy
119 */
121
122 /**
123 * Move constructor.
124 * @param accessor Accessor to move
125 */
127
128 protected:
129
130 /**
131 * Creates a new accessor object.
132 */
134
135 /**
136 * Creates a new accessor object.
137 * @param elementGroups The element groups of the object
138 */
139 inline CorrespondenceGroupsAccessor(const ElementGroups& elementGroups);
140
141 /**
142 * Creates a new accessor object.
143 * @param elementGroups The element groups of the object to move
144 */
146
147 protected:
148
149 /// The groups of elements of this accessor.
151 };
152
153 /**
154 * This class implements an abstract specialization of the accessor for groups of pairs for object points.
155 * The accessor provides one group for each object point while each group holds pairs of correspondences between poses and image points.
156 */
158 {
159 protected:
160
161 /**
162 * Creates a new accessor object.
163 */
165
166 /**
167 * Creates a new accessor object.
168 * @param elementGroups The element groups of the object
169 */
170 inline ObjectPointGroupsAccessor(const ElementGroups& elementGroups);
171
172 /**
173 * Creates a new accessor object.
174 * @param elementGroups The element groups of the object to move
175 */
176 inline ObjectPointGroupsAccessor(ElementGroups&& elementGroups);
177 };
178
179 /**
180 * This class implements an abstract specialization of the accessor for groups of pairs for poses.
181 * The accessor provides one group for each pose while each group holds pairs of correspondences between object points and image points.
182 */
183 class PoseGroupsAccessor : public CorrespondenceGroupsAccessor<Index32, Vector2>
184 {
185 protected:
186
187 /**
188 * Creates a new accessor object.
189 */
190 inline PoseGroupsAccessor();
191
192 /**
193 * Creates a new accessor object.
194 * @param elementGroups The element groups of the object
195 */
196 inline PoseGroupsAccessor(const ElementGroups& elementGroups);
197
198 /**
199 * Creates a new accessor object.
200 * @param elementGroups The element groups of the object to move
201 */
202 inline PoseGroupsAccessor(ElementGroups&& elementGroups);
203 };
204
205 /**
206 * This class implements a group accessor providing access to pairs of poses and image points.
207 * The groups of pairs have the following structure, each object point can have an arbitrary number of pairs:
208 * <pre>
209 * objectPoint_0 -> (pose_0, imagePoint_0)
210 * -> (pose_1, imagePoint_4)
211 * -> (pose_5, imagePoint_9)
212 *
213 * objectPoint_1 -> (pose_0, imagePoint_2)
214 *
215 * objectPoint_2 -> (pose_2, imagePoint_3)
216 * (pose_1, imagePoint_8)
217 *
218 * objectPoint_3 -> (pose_9, imagePoint_5)
219 * </pre>
220 */
222 {
223 public:
224
225 /**
226 * Default constructor.
227 */
229
230 /**
231 * Creates a new accessor object for two sets of image points which are the observations of the same object points in two individual camera frames.
232 * @param imagePoints0 The first set of image points which are the observations of the object points in the first pose
233 * @param imagePoints1 The second set of image points which are the observations of the object points in the second pose
234 * @tparam TAccessor The data type of the accessor providing the image points
235 */
236 template <typename TAccessor>
237 ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const TAccessor& imagePoints0, const TAccessor& imagePoints1);
238
239 /**
240 * Creates a new accessor object for a set of object points all observed by the same number of image points in a set of camera poses.
241 * Thus, the number of corresponding image points for one object points is identical to the number of camera poses.
242 * @param imagePointGroups The group of image points observing the object points, one group for each object point, the indices of the image points are identical to the indices of the corresponding camera poses
243 * @tparam TAccessor The data type of the accessor providing the image points
244 */
245 template <typename TAccessor>
246 explicit ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const std::vector<TAccessor>& imagePointGroups);
247
248 /**
249 * Creates a new accessor object for a set of object points all observed by the same number of image points in a set of camera poses.
250 * Thus, the number of corresponding image points for one object points is identical to the number of camera poses.<br>
251 * The provided image points have the following pattern for n object points:<br>
252 * <pre>
253 * Group 0 / Object Point 0 , Group 1 / Object Point 1 , Group 2 / Object Point2 ...
254 * i0, i1, i2, i3, i4, i5, ..., in, in+1, in+2, in+3, in+5, ..., in+n+1, in+n+2, ...
255 * </pre>
256 * where i0, in+1, in+n+1 ... are the image points located in the first camera frame; i1, in+2, in+n+2 ... are image points located in the second frame a.s.o.
257 * @param sequentialImagePointGroups The group of image points observing the object points provided sequentially, one sequential group for each object point, the indices of the image points (within the sequential groups) are identical to the indices of the corresponding camera poses, while imagePointGroups.size() is a multiple of numberObjectPoints
258 * @param numberObjectPoints The number of object points (the number of sequential groups of image points), with range [1, infinity)
259 * @tparam TAccessor The data type of the accessor providing the image points
260 */
261 template <typename TAccessor>
262 ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const TAccessor& sequentialImagePointGroups, const size_t numberObjectPoints);
263
264 /**
265 * Creates a new accessor object for a set of object points all observed by the same number of image points in a set of camera poses.
266 * Thus, the number of corresponding image points for one object points is identical to the number of camera poses.<br>
267 * The provided image points have the following pattern for n object points:<br>
268 * <pre>
269 * Camera pose 0 , Camera pose 1 , Camera pose 2 ...
270 * i0, i1, i2, i3, i4, i5, ..., in, in+1, in+2, in+3, in+5, ..., in+n+1, in+n+2, ...
271 * </pre>
272 * where i0, in+1, in+n+1 ... are the image points observing the first object point; i1, in+2, in+n+2 ... are image points observing the second object point a.s.o.
273 * @param numberObjectPoints The number of object points, with range [1, infinity)
274 * @param imagePoints The set of image points observing the object points provided in sequential groups, each group holds all image points for one camera pose, while imagePoint.size() is a multiple of numberObjectPoints
275 * @tparam TAccessor The data type of the accessor providing the image points
276 */
277 template <typename TAccessor>
278 ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const size_t numberObjectPoints, const TAccessor& imagePoints);
279
280 /**
281 * Adds the observations of a new object point to this accessor.
282 * @param poseIdImagePointPairs The pairs combining the pose id and the image point locations, at least one
283 * @return The index of the new object point which has been added, with range [0, infinity)
284 */
285 inline size_t addObjectPoint(Elements&& poseIdImagePointPairs);
286 };
287
288 /**
289 * This class implements the base optimization provider.
290 */
292 {
293 public:
294
295 /**
296 * Returns whether the provider comes with an own solver for the linear equation.
297 * @return True, if so
298 */
299 inline bool hasSolver() const;
300
301 /**
302 * Solves the linear equation JTJ * deltas = jErrors.
303 * @param JTJ The Hesse matrix (transposed jacobian multiplied by the jacobian)
304 * @param jErrors The individual error values
305 * @param deltas The resulting individual delta values
306 * @return True, if the equation could be solved
307 */
308 inline bool solve(const Matrix& JTJ, const Matrix& jErrors, Matrix& deltas) const;
309
310 /**
311 * Solves the linear equation JTJ * deltas = jErrors.
312 * @param JTJ The Hesse matrix (transposed jacobian multiplied by the jacobian)
313 * @param jErrors The individual error values
314 * @param deltas The resulting individual delta values
315 * @return True, if the equation could be solved
316 */
317 inline bool solve(const SparseMatrix& JTJ, const Matrix& jErrors, Matrix& deltas) const;
318 };
319
320 /**
321 * This class implements the base for an advanced dense optimization provider.
322 * The advanced optimization provider is able to determine an overall error as well as the entire Hessian matrix and Jacobian-Error vector for any intermediate Levenberg-Marquardt optimization step.
323 * @see OptimizationProvider
324 */
326 {
327 public:
328
329 /**
330 * Determines the error for the current model candidate (not the actual model).
331 * Needs to be implemented in the actual provider.
332 * @return The error for the current model candidate
333 */
334 inline Scalar determineError();
335
336 /**
337 * Determines the Hessian matrix and the Error-Jacobian vector based on the actual/current model (the transposed jacobian multiplied with the individual errors).
338 * Needs to be implemented in the actual provider.
339 * @param hessian The resulting (n x n) Hessian matrix for a model with dimension n, which is the transposed Jacobian multiplied with the Jacobian, may be the approximated Hessian
340 * @param jacobianError The resulting (n x 1) Error-Jacobian vector
341 * @return True, if succeeded
342 */
343 inline bool determineHessianAndErrorJacobian(Matrix& hessian, Matrix& jacobianError);
344
345 /**
346 * Creates a new model candidate by adjusting the current model with delta values.
347 * Needs to be implemented in the actual provider.
348 * @param deltas The delta values to be used applied
349 */
350 inline void applyCorrection(const Matrix& deltas);
351
352 /**
353 * Accepts the current model candidate a new (better) model than the previous one.
354 * Needs to be implemented in the actual provider.
355 */
356 inline void acceptCorrection();
357
358 /**
359 * Returns whether the optimization process should stop e.g., due to an external event.
360 * Needs to be implemented in the actual provider.
361 * @return True, to stop the optimization process
362 */
363 inline bool shouldStop();
364
365 /**
366 * Returns whether the provider comes with an own solver for the linear equation.
367 * @return True, if so
368 */
369 inline bool hasSolver() const;
370
371 /**
372 * Solves the linear equation Hessian * deltas = -jacobianError.
373 * @param hessian The Hesse matrix (transposed jacobian multiplied by the jacobian)
374 * @param jacobianError The transposed jacobian multiplied with the individual errors
375 * @param deltas The resulting individual delta values
376 * @return True, if the equation could be solved
377 */
378 inline bool solve(const Matrix& hessian, const Matrix& jacobianError, Matrix& deltas) const;
379
380 protected:
381
382 /**
383 * Protected default constructor.
384 */
386 };
387
388 /**
389 * This class implements the base class for an advanced sparse optimization provider.
390 * @see AdvancedDenseOptimizationProvider, OptimizationProvider
391 */
393 {
394 public:
395
396 /**
397 * Determines the error for the current model candidate (not the actual/actual model).
398 * Needs to be implemented in the actual provider.
399 * @return The error for the current model candidate
400 */
401 inline Scalar determineError();
402
403 /**
404 * Determines any kind of (abstract) parameters based on the current/actual model (not the model candidate) e.g., the Jacobian parameters and/or a Hessian matrix.
405 * The provide is responsible for the actual data.<br>
406 * Needs to be implemented in the actual provider.
407 * @return True, if succeeded
408 */
409 inline bool determineParameters();
410
411 /**
412 * Creates a new model candidate by adjusting the current/actual model with delta values.
413 * Needs to be implemented in the actual provider.
414 * @param deltas The delta values to be used applied
415 */
416 inline void applyCorrection(const Matrix& deltas);
417
418 /**
419 * Accepts the current model candidate a new (better) model than the previous one.
420 * Needs to be implemented in the actual provider.
421 */
422 inline void acceptCorrection();
423
424 /**
425 * Returns whether the optimization process should stop e.g., due to an external event.
426 * Needs to be implemented in the actual provider.
427 * @return True, to stop the optimization process
428 */
429 inline bool shouldStop();
430
431 /**
432 * Solves the linear equation Hessian * deltas = -jacobianError based on the internal data.
433 * The diagonal of the Hessian matrix will be multiplied by (1 + lambda) before the actual solving starts.<br>
434 * This function may be invoked several times before determineParameters() is invoked again so that the diagonal of the original Hessian matrix should be copied.
435 * @param deltas The resulting individual delta values
436 * @param lambda Optional lambda value for a Levenberg-Marquardt optimization, with range [0, infinity)
437 * @return True, if the equation could be solved
438 */
439 inline bool solve(Matrix& deltas, const Scalar lambda = Scalar(0));
440
441 protected:
442
443 /**
444 * Protected default constructor.
445 */
447 };
448
449 public:
450
451 /**
452 * Invokes the optimization of a dense (matrix) optimization problem.
453 * @param provider The optimization provider that is used during the optimization
454 * @param iterations Number of optimization iterations
455 * @param estimator Robust estimator to be applied
456 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
457 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
458 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
459 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
460 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
461 * @param intermediateErrors Optional resulting intermediate (improving) errors in relation to the defined estimator
462 * @return True, if at least one successful optimization iteration has been executed
463 * @tparam T Data type of the optimization provider
464 */
465 template <typename T>
466 static inline bool denseOptimization(T& provider, const unsigned int iterations = 5u, const Estimator::EstimatorType estimator = Estimator::ET_SQUARE, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
467
468 /**
469 * Invokes the optimization of a dense (matrix) optimization problem.
470 * @param provider The optimization provider that is used during the optimization
471 * @param iterations Number of optimization iterations
472 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
473 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
474 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
475 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
476 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
477 * @param intermediateErrors Optional resulting intermediate (improving) errors
478 * @return True, if at least one successful optimization iteration has been executed
479 * @tparam T Data type of the optimization provider
480 * @tparam tEstimator Type of the robust estimator to be applied
481 */
482 template <typename T, Estimator::EstimatorType tEstimator>
483 static bool denseOptimization(T& provider, const unsigned int iterations = 5u, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
484
485 /**
486 * Invokes the optimization of a sparse (matrix) optimization problem.
487 * @param provider The optimization provider that is used during the optimization
488 * @param iterations Number of optimization iterations
489 * @param estimator Robust estimator to be applied
490 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
491 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
492 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
493 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
494 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
495 * @param intermediateErrors Optional resulting intermediate (improving) errors
496 * @return True, if at least one successful optimization iteration has been executed
497 * @tparam T Data type of the optimization provider
498 */
499 template <typename T>
500 static bool sparseOptimization(T& provider, const unsigned int iterations = 5u, const Estimator::EstimatorType estimator = Estimator::ET_SQUARE, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
501
502 /**
503 * Invokes the optimization of a sparse (matrix) optimization problem.
504 * @param provider The optimization provider that is used during the optimization
505 * @param iterations Number of optimization iterations
506 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
507 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
508 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
509 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
510 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
511 * @param intermediateErrors Optional resulting intermediate (improving) errors
512 * @return True, if at least one successful optimization iteration has been executed
513 * @tparam T Data type of the optimization provider
514 * @tparam tEstimator Type of the robust estimator to be applied
515 */
516 template <typename T, Estimator::EstimatorType tEstimator>
517 static bool sparseOptimization(T& provider, const unsigned int iterations = 5u, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
518
519 /**
520 * Invokes the optimization of a dense (matrix) optimization problem using an advanced optimization provider.
521 * The optimization can use a Levenberg-Marquardt approach or a Gauss-Newton approach.
522 * @param advancedDenseProvider The advanced dense optimization provider that is used during the optimization
523 * @param iterations Number of optimization iterations
524 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity), 0 to apply a Gauss-Newton optimization approach
525 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity), 1 to apply a Gauss-Newton optimization approach
526 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
527 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
528 * @param intermediateErrors Optional resulting intermediate (improving) errors
529 * @return True, if at least one successful optimization iteration has been executed
530 * @tparam T Data type of the advanced dense optimization provider, must be derived from AdvancedDenseOptimizationProvider
531 */
532 template <typename T>
533 static bool advancedDenseOptimization(T& advancedDenseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError = nullptr, Scalar* finalError = nullptr, Scalars* intermediateErrors = nullptr);
534
535 /**
536 * Invokes the optimization of a sparse (matrix) optimization problem using an advanced optimization provider.
537 * The optimization can use a Levenberg-Marquardt approach or a Gauss-Newton approach.
538 * @param advancedSparseProvider The advanced sparse optimization provider that is used during the optimization
539 * @param iterations Number of optimization iterations
540 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity), 0 to apply a Gauss-Newton optimization approach
541 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity), 1 to apply a Gauss-Newton optimization approach
542 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
543 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
544 * @param intermediateErrors Optional resulting intermediate (improving) errors
545 * @return True, if at least one successful optimization iteration has been executed
546 * @tparam T Data type of the advanced sparse optimization provider, must be derived from AdvancedSparseOptimizationProvider
547 */
548 template <typename T>
549 static bool advancedSparseOptimization(T& advancedSparseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError = nullptr, Scalar* finalError = nullptr, Scalars* intermediateErrors = nullptr);
550
551 /**
552 * Translates the n/2 squared errors that correspond to n elements in the error vector to robust errors.
553 * @param sqrErrors The n/2 squared errors
554 * @param modelParameters Number of parameters that define the model that has to be optimized
555 * @param weightedErrors Initial n errors that will be translated to robust errors
556 * @param weightVectors The n individual weights that have been applied to the errors
557 * @param transposedInvertedCovariances Optional transposed 2x2 inverted covariance matrices, one for each pair of errors
558 * @return Resulting averaged robust error of the entire data set
559 * @tparam tEstimator Robust error estimator to be used
560 */
561 template <Estimator::EstimatorType tEstimator>
562 static Scalar sqrErrors2robustErrors2(const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances);
563
564 /**
565 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
566 * @param sqrErrors The n/i squared errors
567 * @param modelParameters Number of parameters that define the model that has to be optimized
568 * @param weightedErrors Initial n errors that will be translated to robust errors
569 * @param weightVectors The n individual weights that have been applied to the errors
570 * @param transposedInvertedCovariances Optional transposed ixi inverted covariance matrices, one for each pair of errors
571 * @return Resulting averaged robust error of the entire data set
572 * @tparam tEstimator Robust error estimator to be used
573 * @tparam tDimension Dimension of one error element
574 */
575 template <Estimator::EstimatorType tEstimator, size_t tDimension>
576 static Scalar sqrErrors2robustErrors(const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances);
577
578 /**
579 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
580 * @param sqrErrors The n/i squared errors
581 * @param modelParameters Number of parameters that define the model that has to be optimized
582 * @param dimension The dimension of one error element, with range [1, infinity)
583 * @param weightedErrors_i Initial n errors that will be translated to robust errors
584 * @param weightVectors_i The n individual weights that have been applied to the errors
585 * @param transposedInvertedCovariances_i Optional transposed ixi inverted covariance matrices, one for each pair of errors
586 * @return Resulting averaged robust error of the entire data set
587 * @tparam tEstimator Robust error estimator to be used
588 */
589 template <Estimator::EstimatorType tEstimator>
590 static Scalar sqrErrors2robustErrors_i(const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i);
591
592 /**
593 * Translates the n/2 squared errors that correspond to n elements in the error vector to robust errors.
594 * @param estimator Robust error estimator to be used
595 * @param sqrErrors The n/2 squared errors
596 * @param modelParameters Number of parameters that define the model that has to be optimized
597 * @param weightedErrors Initial n errors that will be translated to robust errors
598 * @param weightVectors The n individual weights that have been applied to the errors
599 * @param transposedInvertedCovariances Optional transposed 2x2 inverted covariance matrices, one for each pair of errors
600 * @return Resulting averaged robust error of the entire data set
601 */
602 static inline Scalar sqrErrors2robustErrors2(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances);
603
604 /**
605 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
606 * @param estimator Robust error estimator to be used
607 * @param sqrErrors The n/i squared errors
608 * @param modelParameters Number of parameters that define the model that has to be optimized
609 * @param weightedErrors Initial n errors that will be translated to robust errors
610 * @param weightVectors The n individual weights that have been applied to the errors
611 * @param transposedInvertedCovariances Optional transposed ixi inverted covariance matrices, one for each pair of errors
612 * @return Resulting averaged robust error of the entire data set
613 * @tparam tDimension Dimension of one error element
614 */
615 template <size_t tDimension>
616 static inline Scalar sqrErrors2robustErrors(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances);
617
618 /**
619 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
620 * @param estimator Robust error estimator to be used
621 * @param sqrErrors The n/i squared errors
622 * @param modelParameters Number of parameters that define the model that has to be optimized
623 * @param dimension The dimension of one error element, with range [1, infinity)
624 * @param weightedErrors_i Initial n errors that will be translated to robust errors
625 * @param weightVectors_i The n individual weights that have been applied to the errors
626 * @param transposedInvertedCovariances_i Optional transposed ixi inverted covariance matrices, one for each pair of errors
627 * @return Resulting averaged robust error of the entire data set
628 */
629 static inline Scalar sqrErrors2robustErrors_i(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i);
630};
631
632template <typename TFirst, typename TSecond>
637
638template <typename TFirst, typename TSecond>
640 elementGroups_(elementGroups)
641{
642 // nothing to do here
643}
644
645template <typename TFirst, typename TSecond>
647 elementGroups_(std::move(elementGroups))
648{
649 // nothing to do here
650}
651
652template <typename TFirst, typename TSecond>
654 elementGroups_(accessor.elementGroups_)
655{
656 // nothing to do here
657}
658
659template <typename TFirst, typename TSecond>
661 elementGroups_(std::move(accessor.elementGroups_))
662{
663 // nothing to do here
664}
665
666template <typename TFirst, typename TSecond>
668{
669 return elementGroups_.size();
670}
671
672template <typename TFirst, typename TSecond>
674{
675 ocean_assert(groupIndex < groups());
676
677 return elementGroups_[groupIndex].size();
678}
679
680template <typename TFirst, typename TSecond>
681inline void NonLinearOptimization::CorrespondenceGroupsAccessor<TFirst, TSecond>::element(const size_t groupIndex, const size_t elementIndex, TFirst& first, TSecond& second) const
682{
683 ocean_assert(groupIndex < groups());
684 ocean_assert(elementIndex < groupElements(groupIndex));
685
686 const Element& element = elementGroups_[groupIndex][elementIndex];
687
688 first = element.first;
689 second = element.second;
690}
691
692template <typename TFirst, typename TSecond>
693inline const TFirst& NonLinearOptimization::CorrespondenceGroupsAccessor<TFirst, TSecond>::firstElement(const size_t groupIndex, const size_t elementIndex) const
694{
695 ocean_assert(groupIndex < groups());
696 ocean_assert(elementIndex < groupElements(groupIndex));
697
698 return elementGroups_[groupIndex][elementIndex].first;
699}
700
701template <typename TFirst, typename TSecond>
702inline const TSecond& NonLinearOptimization::CorrespondenceGroupsAccessor<TFirst, TSecond>::secondElement(const size_t groupIndex, const size_t elementIndex) const
703{
704 ocean_assert(groupIndex < groups());
705 ocean_assert(elementIndex < groupElements(groupIndex));
706
707 return elementGroups_[groupIndex][elementIndex].second;
708}
709
710template <typename TFirst, typename TSecond>
716
717template <typename TFirst, typename TSecond>
719{
720 if (this != &accessor)
721 {
722 elementGroups_ = std::move(accessor.elementGroups_);
723 }
724
725 return *this;
726}
727
732
735{
736 // nothing to do here
737}
738
740 CorrespondenceGroupsAccessor<Index32, Vector2>(std::move(elementGroups))
741{
742 // nothing to do here
743}
744
746{
747 // nothing to do here
748}
749
750inline NonLinearOptimization::PoseGroupsAccessor::PoseGroupsAccessor(const ElementGroups& elementGroups) :
752{
753 // nothing to do here
754}
755
757 CorrespondenceGroupsAccessor<Index32, Vector2>(std::move(elementGroups))
758{
759 // nothing to do here
760}
761
763{
764 return false;
765}
766
767inline bool NonLinearOptimization::OptimizationProvider::solve(const Matrix& /*JTJ*/, const Matrix& /*jErrors*/, Matrix& /*deltas*/) const
768{
769 ocean_assert(false && "Must be implemented in a derived class!");
770 return false;
771}
772
773inline bool NonLinearOptimization::OptimizationProvider::solve(const SparseMatrix& /*JTJ*/, const Matrix& /*jErrors*/, Matrix& /*deltas*/) const
774{
775 ocean_assert(false && "Must be implemented in a derived class!");
776 return false;
777}
778
783
785{
786 ocean_assert(false && "Must be implemented in a derived class!");
787 return Numeric::maxValue();
788}
789
791{
792 ocean_assert(false && "Must be implemented in a derived class!");
793 return false;
794}
795
797{
798 ocean_assert(false && "Must be implemented in a derived class!");
799}
800
802{
803 ocean_assert(false && "Must be implemented in a derived class!");
804}
805
807{
808 ocean_assert(false && "Must be implemented in a derived class!");
809 return true;
810}
811
813{
814 return false;
815}
816
817inline bool NonLinearOptimization::AdvancedDenseOptimizationProvider::solve(const Matrix& /*hessian*/, const Matrix& /*jacobianError*/, Matrix& /*deltas*/) const
818{
819 ocean_assert(false && "Must be implemented in a derived class!");
820 return false;
821}
822
827
829{
830 ocean_assert(false && "Must be implemented in a derived class!");
831 return Numeric::maxValue();
832}
833
835{
836 ocean_assert(false && "Must be implemented in a derived class!");
837 return false;
838}
839
841{
842 ocean_assert(false && "Must be implemented in a derived class!");
843}
844
846{
847 ocean_assert(false && "Must be implemented in a derived class!");
848}
849
851{
852 ocean_assert(false && "Must be implemented in a derived class!");
853 return true;
854}
855
857{
858 ocean_assert(false && "Must be implemented in a derived class!");
859 return true;
860}
861
862template <typename TAccessor>
864 ObjectPointGroupsAccessor(ElementGroups(imagePoints0.size(), Elements(2)))
865{
866 ocean_assert(imagePoints0.size() == imagePoints1.size());
867
868 for (size_t n = 0; n < imagePoints0.size(); ++n)
869 {
870 elementGroups_[n][0].first = 0;
871 elementGroups_[n][0].second = imagePoints0[n];
872
873 elementGroups_[n][1].first = 1;
874 elementGroups_[n][1].second = imagePoints1[n];
875 }
876}
877
878template <typename TAccessor>
880 ObjectPointGroupsAccessor(ElementGroups(imagePointGroups.size()))
881{
882 ocean_assert(elementGroups_.size() == imagePointGroups.size());
883
884 for (size_t n = 0; n < imagePointGroups.size(); ++n)
885 {
886 const ConstIndexedAccessor<Vector2>& imagePoints = imagePointGroups[n];
887 ocean_assert(imagePoints.size() >= 2);
888
889 Elements poseIndexImagePoints;
890 poseIndexImagePoints.reserve(imagePoints.size());
891
892 for (size_t i = 0; i < imagePoints.size(); ++i)
893 {
894 poseIndexImagePoints.emplace_back(Index32(i), imagePoints[i]);
895 }
896
897 elementGroups_[n] = std::move(poseIndexImagePoints);
898 }
899}
900
901template <typename TAccessor>
903 ObjectPointGroupsAccessor(ElementGroups(numberObjectPoints))
904{
905 ocean_assert(elementGroups_.size() == numberObjectPoints);
906 ocean_assert(sequentialImagePointGroups.size() % numberObjectPoints == 0);
907
908 const unsigned int numberImagePoints = (unsigned int)(sequentialImagePointGroups.size() / numberObjectPoints);
909
910 unsigned int index = 0u;
911
912 for (size_t n = 0; n < numberObjectPoints; ++n)
913 {
914 Elements poseIndexImagePoints;
915 poseIndexImagePoints.reserve(numberImagePoints);
916
917 for (unsigned int i = 0u; i < numberImagePoints; ++i)
918 {
919 poseIndexImagePoints.emplace_back(Index32(i), sequentialImagePointGroups[index++]);
920 }
921
922 elementGroups_[n] = std::move(poseIndexImagePoints);
923 }
924}
925
926template <typename TAccessor>
928 ObjectPointGroupsAccessor(ElementGroups(numberObjectPoints, Elements(numberObjectPoints == 0 ? 0 : (imagePoints.size() / numberObjectPoints))))
929{
930 ocean_assert(elementGroups_.size() == numberObjectPoints);
931 ocean_assert(imagePoints.size() % numberObjectPoints == 0);
932
933 ocean_assert(numberObjectPoints != 0);
934 const unsigned int numberImagePoints = (unsigned int)(imagePoints.size() / numberObjectPoints);
935
936 unsigned int poseIndex = 0u;
937 unsigned int index = 0u;
938
939 for (size_t n = 0; n < numberImagePoints; ++n)
940 {
941 for (size_t i = 0; i < numberObjectPoints; ++i)
942 {
943 ocean_assert(elementGroups_[i].size() == numberImagePoints);
944 elementGroups_[i][poseIndex] = Element(poseIndex, imagePoints[index++]);
945 }
946
947 poseIndex++;
948 }
949}
950
952{
953 ocean_assert(!poseIdImagePointPairs.empty());
954
955 const size_t objectPointIndex = elementGroups_.size();
956
957 elementGroups_.emplace_back(std::move(poseIdImagePointPairs));
958
959 return objectPointIndex;
960}
961
962template <typename T>
963inline bool NonLinearOptimization::denseOptimization(T& provider, const unsigned int iterations, const Estimator::EstimatorType estimator, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
964{
965 switch (estimator)
966 {
968 return denseOptimization<T, Estimator::ET_SQUARE>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
969
971 return denseOptimization<T, Estimator::ET_LINEAR>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
972
974 return denseOptimization<T, Estimator::ET_HUBER>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
975
977 return denseOptimization<T, Estimator::ET_TUKEY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
978
980 return denseOptimization<T, Estimator::ET_CAUCHY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
981
982 default:
983 ocean_assert(false && "Invalid estimator!");
984 return false;
985 }
986}
987
988template <typename T, Estimator::EstimatorType tEstimator>
989bool NonLinearOptimization::denseOptimization(T& provider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
990{
991 constexpr Scalar maxLambda = Scalar(1e8);
992
993 ocean_assert(lambda >= Numeric::eps() && lambda <= maxLambda);
994
995 Matrix jacobian;
996
997 // the error vectors are weighted for non-square estimators only
998 Matrix weightedErrorVector, swapWeightedErrorVector;
999 // the weight vectors are used for non-square estimators only
1000 Matrix weightVector, swapWeightVector;
1001
1002 Matrix JTJ, jErrors, deltas;
1003
1004 // intermediate matrices that are used if a covariance matrix is provided
1005 Matrix intermediateCovarianceJacobian;
1006 Matrix intermediateWeightedErrorVector;
1007
1008 Scalar bestError = provider.template determineRobustError<tEstimator>(weightedErrorVector, weightVector, invertedCovariances);
1009
1010 if (bestError == Numeric::maxValue())
1011 {
1012 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1013 return false;
1014 }
1015
1016 if (initialError != nullptr)
1017 {
1018 *initialError = bestError;
1019 }
1020
1021 if (intermediateErrors != nullptr)
1022 {
1023 ocean_assert(intermediateErrors->empty());
1024 intermediateErrors->push_back(bestError);
1025 }
1026
1027 bool oneValidIteration = false;
1028
1029 unsigned int i = 0u;
1030 while (i < iterations)
1031 {
1032 provider.determineJacobian(jacobian);
1033
1034 // if inverted covariance matrices are provided a further matrix multiplication must be respected
1035 if (invertedCovariances)
1036 {
1037 if constexpr (tEstimator == Estimator::ET_SQUARE)
1038 {
1039 // J^+ = (J^T * iCV * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1040
1041 invertedCovariances->selfSquareDiagonalMatrixMultiply(jacobian, intermediateCovarianceJacobian);
1042 jacobian.transposedMultiply(intermediateCovarianceJacobian, JTJ);
1043 ocean_assert(JTJ.columns() == JTJ.rows());
1044 }
1045 else
1046 {
1047 // J^+ = (J^T * iCV * diag(weights) * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1048
1049 invertedCovariances->selfSquareDiagonalMatrixMultiply(weightVector, jacobian, intermediateCovarianceJacobian);
1050 jacobian.transposedMultiply(intermediateCovarianceJacobian, JTJ);
1051 ocean_assert(JTJ.columns() == JTJ.rows());
1052 }
1053
1054 // J^T * iCV * diag(weights) * error
1055
1056 invertedCovariances->selfSquareDiagonalMatrixMultiply(weightedErrorVector, intermediateWeightedErrorVector);
1057 jacobian.transposedMultiply(intermediateWeightedErrorVector, jErrors);
1058
1059 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1060 }
1061 else
1062 {
1063 if constexpr (tEstimator == Estimator::ET_SQUARE)
1064 {
1065 // J^+ = (J^T * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1066 jacobian.selfTransposedSquareMatrix(JTJ);
1067 ocean_assert(JTJ.columns() == JTJ.rows());
1068 }
1069 else
1070 {
1071 // J^+ = (J^T * diag(weights) * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1072 jacobian.weightedSelfTransposedSquareMatrix(weightVector, JTJ);
1073 ocean_assert(JTJ.columns() == JTJ.rows());
1074 }
1075
1076 // error = J^T * diag(weights)
1077 jacobian.transposedMultiply(weightedErrorVector, jErrors);
1078 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1079 }
1080
1081 const Matrix JTJdiagonal(JTJ.diagonal());
1082
1083 while (i < iterations)
1084 {
1085 ++i;
1086
1087 // J^T * J = J^T * J + lambda * diag(J^T * J)
1088 if (lambda > Numeric::eps())
1089 {
1090 for (unsigned int n = 0; n < JTJ.columns(); ++n)
1091 {
1092 JTJ(n, n) = JTJdiagonal(n, 0) * (Scalar(1) + lambda);
1093 }
1094 }
1095
1096 // JTJ * deltas = -J^T * error
1097 // however, we determine JTJ * deltas = J^T * error and thus receive negative deltas (which then need to be subtracted from the current model/parameter configuration)
1098
1099 bool solved = false;
1100
1101 if (provider.hasSolver())
1102 {
1103 solved = provider.solve(JTJ, jErrors, deltas);
1104 }
1105 else
1106 {
1107 solved = JTJ.solve<Matrix::MP_SYMMETRIC>(jErrors, deltas);
1108 }
1109
1110 if (solved)
1111 {
1112 oneValidIteration = true;
1113
1114 // check whether the offset has been converged
1115 if (Numeric::isEqualEps(deltas.norm() / Scalar(deltas.elements())))
1116 {
1117 i = iterations;
1118 }
1119
1120 // we apply the deltas by: new = old - deltas (due to the solved equation: JTJ * deltas = J^T * error)
1121 provider.applyCorrection(deltas);
1122
1123 const Scalar iterationError = provider.template determineRobustError<tEstimator>(swapWeightedErrorVector, swapWeightVector, invertedCovariances);
1124
1125 // check whether the new error is not better than the best one
1126 if (iterationError >= bestError)
1127 {
1128 // modify the lambda parameter and start a new optimization, as long as the lambda is not zero already or too large
1129 if (lambdaFactor > Numeric::eps() && lambda > 0 && lambda <= maxLambda)
1130 {
1131 lambda *= lambdaFactor;
1132 }
1133 else
1134 {
1135 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1136
1137 // no further improvement can be applied
1138 i = iterations;
1139 }
1140
1141 continue;
1142 }
1143
1144 // we have an improvement
1145 bestError = iterationError;
1146
1147 if (intermediateErrors != nullptr)
1148 {
1149 intermediateErrors->push_back(bestError);
1150 }
1151
1152 provider.acceptCorrection();
1153
1154 std::swap(swapWeightedErrorVector, weightedErrorVector);
1155 std::swap(swapWeightVector, weightVector);
1156
1157 if (Numeric::isNotEqualEps(lambdaFactor))
1158 {
1159 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1160 if (lambda > Numeric::eps())
1161 {
1162 lambda /= lambdaFactor;
1163 }
1164 }
1165
1166 // skip this inner loop here as a new jacobian has to be calculated
1167 break;
1168 }
1169 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1170 {
1171 lambda *= lambdaFactor;
1172 }
1173 else
1174 {
1175 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1176
1177 // no further improvement can be applied
1178 i = iterations;
1179 }
1180 }
1181 }
1182
1183 if (finalError)
1184 {
1185 *finalError = bestError;
1186 }
1187
1188 return oneValidIteration;
1189}
1190
1191template <typename T>
1192bool NonLinearOptimization::sparseOptimization(T& provider, const unsigned int iterations, const Estimator::EstimatorType estimator, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
1193{
1194 switch (estimator)
1195 {
1197 return sparseOptimization<T, Estimator::ET_SQUARE>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1198
1200 return sparseOptimization<T, Estimator::ET_LINEAR>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1201
1203 return sparseOptimization<T, Estimator::ET_HUBER>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1204
1206 return sparseOptimization<T, Estimator::ET_TUKEY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1207
1209 return sparseOptimization<T, Estimator::ET_CAUCHY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1210
1211 default:
1212 ocean_assert(false && "Invalid estimator!");
1213 return false;
1214 }
1215}
1216
1217template <typename T, Estimator::EstimatorType tEstimator>
1218bool NonLinearOptimization::sparseOptimization(T& provider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
1219{
1220 constexpr Scalar maxLambda = Scalar(1e8);
1221
1222 ocean_assert(lambda >= Numeric::eps() && lambda <= maxLambda);
1223
1224 SparseMatrix jacobian;
1225
1226 // the error vectors are weighted for non-square estimators only
1227 Matrix weightedErrorVector, swapWeightedErrorVector;
1228 // the weight vectors are used for non-square estimators only
1229 Matrix weightVector, swapWeightVector;
1230
1231 const SparseMatrix invertedCovarianceMatrix = invertedCovariances ? SparseMatrix(invertedCovariances->rows(), invertedCovariances->rows(), *invertedCovariances) : SparseMatrix();
1232
1233 SparseMatrix JTJ;
1234 Matrix jErrors, deltas;
1235
1236 Scalar bestError = provider.template determineRobustError<tEstimator>(weightedErrorVector, weightVector, invertedCovariances);
1237
1238 if (bestError == Numeric::maxValue())
1239 {
1240 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1241 return false;
1242 }
1243
1244 if (initialError)
1245 {
1246 *initialError = bestError;
1247 }
1248
1249 if (intermediateErrors != nullptr)
1250 {
1251 ocean_assert(intermediateErrors->empty());
1252 intermediateErrors->push_back(bestError);
1253 }
1254
1255 bool oneValidIteration = false;
1256
1257 unsigned int i = 0u;
1258 while (i < iterations)
1259 {
1260 provider.determineJacobian(jacobian);
1261
1262 // if inverted covariance matrices are provided a further matrix multiplication must be respected
1263 if (invertedCovariances)
1264 {
1265 // delta = -(J^T * iCV * diag(weights) * J + lambda * diag(J^T * J)^-1 * J^T * iCV * diag(weights) * error
1266 // (J^T * iCV * diag(weights) * J + lambda * diag(J^T * J) * delta = -J^T * iCV * diag(weights) * error
1267 // (J^T * iCV * diag(weights) * J + lambda * diag(J^T * J) * -delta = J^T * iCV * diag(weights) * error
1268
1269 // J^T
1270 const SparseMatrix jacobianTransposed(jacobian.transposed());
1271
1272 if constexpr (tEstimator == Estimator::ET_SQUARE)
1273 {
1274 // J^T * iCV * J
1275 JTJ = jacobianTransposed * (invertedCovarianceMatrix * jacobian);
1276 ocean_assert(JTJ.columns() == JTJ.rows());
1277 }
1278 else
1279 {
1280 // J^T * ICV * diag(weights) * J
1281 JTJ = jacobianTransposed * (invertedCovarianceMatrix * (SparseMatrix(weightVector.rows(), weightVector.rows(), weightVector, true) * jacobian));
1282 ocean_assert(JTJ.columns() == JTJ.rows());
1283 }
1284
1285 // J^T * iCV * diag(weights) * error
1286 jErrors = jacobianTransposed * (invertedCovarianceMatrix * weightedErrorVector);
1287 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1288 }
1289 else
1290 {
1291 // delta = -(J^T * diag(weights) * J + lambda * diag(J^T * J)^-1 * J^T * diag(weights) * error
1292 // (J^T * diag(weights) * J + lambda * diag(J^T * J) * delta = -J^T * diag(weights) * error
1293 // (J^T * diag(weights) * J + lambda * diag(J^T * J) * -delta = J^T * diag(weights) * error
1294
1295 // J^T
1296 const SparseMatrix jacobianTransposed(jacobian.transposed());
1297
1298 if constexpr (tEstimator == Estimator::ET_SQUARE)
1299 {
1300 // J^T * J
1301 JTJ = jacobianTransposed * jacobian;
1302 ocean_assert(JTJ.columns() == JTJ.rows());
1303 }
1304 else
1305 {
1306 // J^T * diag(weights) * J
1307 JTJ = jacobianTransposed * SparseMatrix(weightVector.rows(), weightVector.rows(), weightVector, true) * jacobian;
1308 ocean_assert(JTJ.columns() == JTJ.rows());
1309 }
1310
1311 // J^T * diag(weights) * error
1312 jErrors = jacobianTransposed * weightedErrorVector;
1313 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1314 }
1315
1316 const Matrix JTJdiagonal(JTJ.diagonal());
1317
1318 while (i < iterations)
1319 {
1320 ++i;
1321
1322 // J^T * J = J^T * J + lambda * diag(J^T * J)
1323 if (lambda > Numeric::eps())
1324 {
1325 for (unsigned int n = 0; n < JTJ.columns(); ++n)
1326 {
1327 if (JTJdiagonal(n, 0) != Scalar(0))
1328 {
1329 ocean_assert(!JTJ.isZero(n, n));
1330 JTJ(n, n) = JTJdiagonal(n, 0) * (Scalar(1) + lambda);
1331 }
1332 }
1333 }
1334
1335 // JTJ * deltas = -J^T * error
1336 // however, we determine JTJ * deltas = J^T * error and thus receive negative deltas (which then need to be subtracted from the current model/parameter configuration)
1337
1338 bool solved = false;
1339
1340 if (provider.hasSolver())
1341 {
1342 solved = provider.solve(JTJ, jErrors, deltas);
1343 }
1344 else
1345 {
1346 solved = JTJ.solve(jErrors, deltas);
1347 }
1348
1349 if (solved)
1350 {
1351 oneValidIteration = true;
1352
1353 // check whether the offset has been converted
1354 if (Numeric::isEqual(deltas.norm() / Scalar(deltas.elements()), 0, Numeric::weakEps() * Scalar(0.01)))
1355 {
1356 i = iterations;
1357 }
1358
1359 // we apply the deltas by: new = old - deltas (due to the solved equation: JTJ * deltas = J^T * error)
1360 provider.applyCorrection(deltas);
1361
1362 const Scalar iterationError = provider.template determineRobustError<tEstimator>(swapWeightedErrorVector, swapWeightVector, invertedCovariances);
1363
1364 // check whether the new error is not better than the best one
1365 if (iterationError >= bestError)
1366 {
1367 // modify the lambda parameter and start a new optimization, as long as the lambda is not zero already or too large
1368 if (lambdaFactor > Numeric::eps() && lambda > 0 && lambda <= maxLambda)
1369 {
1370 lambda *= lambdaFactor;
1371 }
1372 else
1373 {
1374 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1375
1376 // no further improvement can be applied
1377 i = iterations;
1378 }
1379
1380 continue;
1381 }
1382
1383 // we have an improvement
1384 bestError = iterationError;
1385
1386 if (intermediateErrors != nullptr)
1387 {
1388 intermediateErrors->push_back(bestError);
1389 }
1390
1391 provider.acceptCorrection();
1392
1393 std::swap(swapWeightedErrorVector, weightedErrorVector);
1394 std::swap(swapWeightVector, weightVector);
1395
1396 if (Numeric::isNotEqualEps(lambdaFactor))
1397 {
1398 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1399 if (lambda > Numeric::eps())
1400 {
1401 lambda /= lambdaFactor;
1402 }
1403 }
1404
1405 // skip this inner loop here as a new jacobian has to be calculated
1406 break;
1407 }
1408 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1409 {
1410 lambda *= lambdaFactor;
1411 }
1412 else
1413 {
1414 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1415
1416 // no further improvement can be applied
1417 i = iterations;
1418 }
1419 }
1420 }
1421
1422 if (finalError)
1423 {
1424 *finalError = bestError;
1425 }
1426
1427 return oneValidIteration;
1428}
1429
1430template <typename T>
1431bool NonLinearOptimization::advancedDenseOptimization(T& advancedDenseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, Scalars* intermediateErrors)
1432{
1433 constexpr Scalar maxLambda = Scalar(1e8);
1434
1435 ocean_assert(lambda >= Scalar(0) && lambda <= maxLambda);
1436 ocean_assert((lambda == Scalar(0) && lambdaFactor == Scalar(1)) || (lambda > Scalar(0) && lambdaFactor > Scalar(1)));
1437
1438 const bool useLevenbergMarquardt = (lambda > 0 && lambdaFactor > 1);
1439
1440 // we determine the initial error only for the Levenberg-Marquardt optimization of if the user explicitly requests that value
1441 Scalar bestError = (useLevenbergMarquardt || initialError) ? advancedDenseProvider.determineError() : Numeric::minValue();
1442
1443 if (bestError == Numeric::maxValue())
1444 {
1445 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1446 return false;
1447 }
1448
1449 if (initialError)
1450 {
1451 *initialError = bestError;
1452 }
1453
1454 if (intermediateErrors != nullptr)
1455 {
1456 ocean_assert(intermediateErrors->empty());
1457 intermediateErrors->push_back(bestError);
1458 }
1459
1460 bool oneValidIteration = false;
1461
1462 Matrix hessian, jacobianError, deltas;
1463
1464 unsigned int i = 0u;
1465 while (!advancedDenseProvider.shouldStop() && i < iterations)
1466 {
1467 /**
1468 * Now the provider will determine the (n x n) Hessian matrix and the (n x 1) Jacobian-Error matrix, may be the approximated Hessian.
1469 * The Hessian matrix is calculated by J^T * J.
1470 * The Jacobian-Error matrix is calculated by J^T * E, with E the error vector.
1471 * The model has dimension n.
1472 */
1473 if (!advancedDenseProvider.determineHessianAndErrorJacobian(hessian, jacobianError))
1474 {
1475 ocean_assert(false && "The Hessian and the Jacobian with combined error values could not be determined!");
1476 return false;
1477 }
1478
1479 ocean_assert(hessian.rows() == hessian.columns());
1480 ocean_assert(hessian.rows() == jacobianError.rows() && jacobianError.columns() == 1);
1481
1482 // now we store the diagonal of the Hessian allowing to apply a Levenberg-Marquardt later
1483
1484 const Matrix hessianDiagonal(hessian.diagonal());
1485 ocean_assert(hessianDiagonal.rows() == hessian.rows() && hessianDiagonal.columns() == 1);
1486
1487 while (!advancedDenseProvider.shouldStop() && i++ < iterations)
1488 {
1489 // we apply the lambda value for the Levenberg-Marquardt:
1490 // Hessian + lambda * diag(Hessian)
1491 if (lambda > Numeric::eps())
1492 {
1493 for (unsigned int n = 0; n < hessian.columns(); ++n)
1494 {
1495 hessian(n, n) = hessianDiagonal(n, 0) * (Scalar(1) + lambda);
1496 }
1497 }
1498
1499 // now we solve: Hessian * deltas = -JacobianErrors, (we will subtract the deltas later so that we now can solve Hessian * deltas = +JacobianErrors)
1500
1501 bool solved = false;
1502
1503 if (advancedDenseProvider.hasSolver())
1504 {
1505 solved = advancedDenseProvider.solve(hessian, jacobianError, deltas);
1506 }
1507 else
1508 {
1509 solved = hessian.solve<Matrix::MP_SYMMETRIC>(jacobianError, deltas);
1510 }
1511
1512 if (solved)
1513 {
1514 oneValidIteration = true;
1515
1516 // check whether the offset has been converted
1517 if (Numeric::isWeakEqualEps(deltas.norm() / Scalar(deltas.elements())))
1518 {
1519 i = iterations;
1520 }
1521
1522 // we apply the deltas by: new = old - deltas
1523 advancedDenseProvider.applyCorrection(deltas);
1524
1525 const Scalar iterationError = (useLevenbergMarquardt || intermediateErrors) ? advancedDenseProvider.determineError() : Numeric::minValue();
1526
1527 // check whether a Levenberg-Marquardt approach is intended and then check whether the new error is not better than the best one
1528 if (useLevenbergMarquardt && iterationError >= bestError)
1529 {
1530 // modify the lambda parameter and start a new optimization
1531 if (lambda > 0 && lambda <= maxLambda)
1532 {
1533 lambda *= lambdaFactor;
1534 }
1535 else
1536 {
1537 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1538
1539 // no further improvement can be applied
1540 i = iterations;
1541 }
1542
1543 continue;
1544 }
1545
1546 // we have an improvement, or we have a Gauss-Newton optimization so that we do not store the previous iteration
1547 bestError = iterationError;
1548
1549 if (intermediateErrors != nullptr)
1550 {
1551 intermediateErrors->push_back(bestError);
1552 }
1553
1554 advancedDenseProvider.acceptCorrection();
1555
1556 if (useLevenbergMarquardt && Numeric::isNotEqualEps(lambdaFactor))
1557 {
1558 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1559 if (lambda > Numeric::eps())
1560 {
1561 lambda /= lambdaFactor;
1562 }
1563 }
1564
1565 // skip this inner loop here as a new jacobian has to be calculated
1566 break;
1567 }
1568 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1569 {
1570 lambda *= lambdaFactor;
1571 }
1572 else
1573 {
1574 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1575
1576 // no further improvement can be applied
1577 i = iterations;
1578 }
1579 }
1580 }
1581
1582 if (finalError)
1583 {
1584 // if Levenberg-Marquardt is not intended we have to determine the error as we did not do it before
1585 if (!useLevenbergMarquardt && !intermediateErrors)
1586 {
1587 bestError = advancedDenseProvider.determineError();
1588 }
1589
1590 *finalError = bestError;
1591 }
1592
1593 return oneValidIteration;
1594}
1595
1596template <typename T>
1597bool NonLinearOptimization::advancedSparseOptimization(T& advancedSparseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, Scalars* intermediateErrors)
1598{
1599 constexpr Scalar maxLambda = Scalar(1e8);
1600
1601 ocean_assert(lambda >= Scalar(0) && lambda <= maxLambda);
1602 ocean_assert((lambda == Scalar(0) && lambdaFactor == Scalar(1)) || (lambda > Scalar(0) && lambdaFactor > Scalar(1)));
1603
1604 const bool useLevenbergMarquardt = (lambda > 0 && lambdaFactor > 1);
1605
1606 // we determine the initial error only for the Levenberg-Marquardt optimization of if the user explicitly requests that value
1607 Scalar bestError = (useLevenbergMarquardt || initialError) ? advancedSparseProvider.determineError() : Numeric::minValue();
1608
1609 if (bestError == Numeric::maxValue())
1610 {
1611 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1612 return false;
1613 }
1614
1615 if (initialError)
1616 {
1617 *initialError = bestError;
1618 }
1619
1620 if (intermediateErrors != nullptr)
1621 {
1622 ocean_assert(intermediateErrors->empty());
1623 intermediateErrors->push_back(bestError);
1624 }
1625
1626 bool oneValidIteration = false;
1627
1628 Matrix deltas;
1629
1630 unsigned int i = 0u;
1631 while (!advancedSparseProvider.shouldStop() && i < iterations)
1632 {
1633 /**
1634 * Now the provider will determine all parameters like the Jacobian, the transposed Jacobian, the Hessian and the Jacobian-Error matrix.
1635 * However, the provider may also determine parts of the values or abstract information as long as the provider is able to solve the linear equation later.
1636 */
1637 if (!advancedSparseProvider.determineParameters())
1638 {
1639 ocean_assert(false && "The provider failed to determine the abstract parameters.");
1640 return false;
1641 }
1642
1643 while (!advancedSparseProvider.shouldStop() && i++ < iterations)
1644 {
1645 ocean_assert(lambda >= Scalar(0));
1646 if (advancedSparseProvider.solve(deltas, lambda))
1647 {
1648 oneValidIteration = true;
1649
1650 // check whether the offset has been converted
1651 if (Numeric::isEqualEps(deltas.norm() / Scalar(deltas.elements())))
1652 {
1653 i = iterations;
1654 }
1655
1656 // we apply the deltas by: new = old - deltas
1657 advancedSparseProvider.applyCorrection(deltas);
1658
1659 const Scalar iterationError = (useLevenbergMarquardt || intermediateErrors) ? advancedSparseProvider.determineError() : Numeric::minValue();
1660
1661 // check whether a Levenberg-Marquardt approach is intended and then check whether the new error is not better than the best one
1662 if (useLevenbergMarquardt && iterationError >= bestError)
1663 {
1664 // modify the lambda parameter and start a new optimization
1665 if (lambda > 0 && lambda <= maxLambda)
1666 {
1667 lambda *= lambdaFactor;
1668 }
1669 else
1670 {
1671 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1672
1673 // no further improvement can be applied
1674 i = iterations;
1675 }
1676
1677 continue;
1678 }
1679
1680 // we have an improvement, or we have a Gauss-Newton optimization so that we do not store the previous iteration
1681 bestError = iterationError;
1682
1683 if (intermediateErrors != nullptr)
1684 {
1685 intermediateErrors->push_back(bestError);
1686 }
1687
1688 advancedSparseProvider.acceptCorrection();
1689
1690 if (useLevenbergMarquardt && Numeric::isNotEqualEps(lambdaFactor))
1691 {
1692 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1693 if (lambda > Numeric::eps())
1694 {
1695 lambda /= lambdaFactor;
1696 }
1697 }
1698
1699 // skip this inner loop here as a new jacobian has to be calculated
1700 break;
1701 }
1702 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1703 {
1704 lambda *= lambdaFactor;
1705 }
1706 else
1707 {
1708 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1709
1710 // no further improvement can be applied
1711 i = iterations;
1712 }
1713 }
1714 }
1715
1716 if (finalError)
1717 {
1718 // if Levenberg-Marquardt is not intended we have to determine the error as we did not do it before
1719 if (!useLevenbergMarquardt && !intermediateErrors)
1720 {
1721 bestError = advancedSparseProvider.determineError();
1722 }
1723
1724 *finalError = bestError;
1725 }
1726
1727 return oneValidIteration;
1728}
1729
1730template <Estimator::EstimatorType tEstimator>
1731Scalar NonLinearOptimization::sqrErrors2robustErrors2(const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances)
1732{
1733 // determine the sigma ideal for the square errors
1734 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors.data(), sqrErrors.size(), modelParameters)) : 0;
1735
1736 Scalar robustError = 0;
1737
1738 for (size_t n = 0; n < sqrErrors.size(); ++n)
1739 {
1740 ocean_assert(Numeric::isEqual(weightedErrors[n].sqr(), sqrErrors[n]));
1741
1742 // determine the weight for each individual image point, however as e.g., the Tukey estimator may return a weight of 0 we have to clamp the weight to ensure that we still can solve the equation
1743 // **NOTE** the much better way would be to remove the entry from the equation and to solve it
1744 const Scalar weight = max(Numeric::weakEps(), Estimator::robustWeightSquare<tEstimator>(sqrErrors[n], sqrSigma));
1745
1746 // increase the total robust error
1747 if (transposedInvertedCovariances)
1748 {
1749 robustError += (transposedInvertedCovariances[n].transposed() * weightedErrors[n]).sqr() * weight;
1750 }
1751 else
1752 {
1753 robustError += sqrErrors[n] * weight;
1754 }
1755
1756 weightedErrors[n] *= weight;
1757
1758 weightVectors[n] = Vector2(weight, weight);
1759 }
1760
1761 // return the averaged robust error
1762 return robustError / Scalar(sqrErrors.size());
1763}
1764
1765template <Estimator::EstimatorType tEstimator, size_t tDimension>
1766Scalar NonLinearOptimization::sqrErrors2robustErrors(const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances)
1767{
1768 ocean_assert(transposedInvertedCovariances == nullptr && "Currently not implemented");
1769 OCEAN_SUPPRESS_UNUSED_WARNING(transposedInvertedCovariances);
1770
1771 // determine the sigma ideal for the square errors
1772 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors.data(), sqrErrors.size(), modelParameters)) : 0;
1773
1774 Scalar robustError = 0;
1775
1776 for (size_t n = 0; n < sqrErrors.size(); ++n)
1777 {
1778 StaticBuffer<Scalar, tDimension>& weightedErrorsPointer = weightedErrors[n];
1779 StaticBuffer<Scalar, tDimension>& weightVectorsPointer = weightVectors[n];
1780
1781 ocean_assert(Numeric::isEqual(Numeric::summedSqr(weightedErrorsPointer.data(), tDimension), sqrErrors[n]));
1782
1783 // determine the weight for each individual image point, however as e.g., the tukey estimator may return a weight of 0 we have to clamp the weight to ensure that we still can solve the equation
1784 // **NOTE** the much better way would be to remove the entry from the equation and to solve it
1785 const Scalar weight = max(Numeric::weakEps(), Estimator::robustWeightSquare<tEstimator>(sqrErrors[n], sqrSigma));
1786
1787 // increase the total robust error
1788 robustError += sqrErrors[n] * weight;
1789
1790 for (size_t d = 0; d < tDimension; ++d)
1791 {
1792 weightedErrorsPointer[d] *= weight;
1793 weightVectorsPointer[d] = weight;
1794 }
1795 }
1796
1797 // return the averaged robust error
1798 return robustError / Scalar(sqrErrors.size());
1799}
1800
1801template <Estimator::EstimatorType tEstimator>
1802Scalar NonLinearOptimization::sqrErrors2robustErrors_i(const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i)
1803{
1804 ocean_assert(transposedInvertedCovariances_i == nullptr && "Currently not implemented");
1805 OCEAN_SUPPRESS_UNUSED_WARNING(transposedInvertedCovariances_i);
1806
1807 // determine the sigma ideal for the square errors
1808 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors.data(), sqrErrors.size(), modelParameters)) : 0;
1809
1810 Scalar robustError = 0;
1811
1812 for (size_t n = 0; n < sqrErrors.size(); ++n)
1813 {
1814 Scalar* const weightedErrorsPointer = weightedErrors_i + n * dimension;
1815 Scalar* const weightVectorsPointer = weightVectors_i + n * dimension;
1816
1817 ocean_assert(Numeric::isEqual(Numeric::summedSqr(weightedErrorsPointer, dimension), sqrErrors[n]));
1818
1819 // determine the weight for each individual image point
1820 const Scalar weight = Estimator::robustWeightSquare<tEstimator>(sqrErrors[n], sqrSigma);
1821
1822 // increase the total robust error
1823 robustError += sqrErrors[n] * weight;
1824
1825 for (size_t d = 0; d < dimension; ++d)
1826 {
1827 weightedErrorsPointer[d] *= weight;
1828 weightVectorsPointer[d] = weight;
1829 }
1830 }
1831
1832 // return the averaged robust error
1833 return robustError / Scalar(sqrErrors.size());
1834}
1835
1836inline Scalar NonLinearOptimization::sqrErrors2robustErrors2(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances)
1837{
1838 switch (estimator)
1839 {
1841 return sqrErrors2robustErrors2<Estimator::ET_SQUARE>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1842
1844 return sqrErrors2robustErrors2<Estimator::ET_LINEAR>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1845
1847 return sqrErrors2robustErrors2<Estimator::ET_HUBER>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1848
1850 return sqrErrors2robustErrors2<Estimator::ET_TUKEY>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1851
1853 return sqrErrors2robustErrors2<Estimator::ET_CAUCHY>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1854
1855 default:
1856 ocean_assert(false && "Invalid estimator!");
1857 return Numeric::maxValue();
1858 }
1859}
1860
1861template <size_t tDimension>
1862inline Scalar NonLinearOptimization::sqrErrors2robustErrors(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances)
1863{
1864 switch (estimator)
1865 {
1867 return sqrErrors2robustErrors<Estimator::ET_SQUARE, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1868
1870 return sqrErrors2robustErrors<Estimator::ET_LINEAR, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1871
1873 return sqrErrors2robustErrors<Estimator::ET_HUBER, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1874
1876 return sqrErrors2robustErrors<Estimator::ET_TUKEY, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1877
1879 return sqrErrors2robustErrors<Estimator::ET_CAUCHY, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1880
1881 default:
1882 ocean_assert(false && "Invalid estimator!");
1883 return Numeric::maxValue();
1884 }
1885}
1886
1887inline Scalar NonLinearOptimization::sqrErrors2robustErrors_i(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i)
1888{
1889 switch (estimator)
1890 {
1892 return sqrErrors2robustErrors_i<Estimator::ET_SQUARE>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1893
1895 return sqrErrors2robustErrors_i<Estimator::ET_LINEAR>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1896
1898 return sqrErrors2robustErrors_i<Estimator::ET_HUBER>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1899
1901 return sqrErrors2robustErrors_i<Estimator::ET_TUKEY>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1902
1904 return sqrErrors2robustErrors_i<Estimator::ET_CAUCHY>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1905
1906 default:
1907 ocean_assert(false && "Invalid estimator!");
1908 return Numeric::maxValue();
1909 }
1910}
1911
1912}
1913
1914}
1915
1916#endif // META_OCEAN_GEOMETRY_NON_LINEAR_OPTIMIZATION_H
virtual size_t size() const =0
Returns the number of accessible elements of this accessor object.
This class implements a base class for all indexed-based accessors allowing a constant reference acce...
Definition Accessor.h:241
EstimatorType
Definition of individual robust estimator types.
Definition Estimator.h:34
@ ET_HUBER
The Huber estimator type.
Definition Estimator.h:84
@ ET_TUKEY
The Tukey estimator.
Definition Estimator.h:102
@ ET_SQUARE
The standard square error estimator (L2).
Definition Estimator.h:52
@ ET_LINEAR
The linear estimator (L1).
Definition Estimator.h:66
@ ET_CAUCHY
The Cauchy estimator.
Definition Estimator.h:118
This class implements the base for an advanced dense optimization provider.
Definition NonLinearOptimization.h:326
void acceptCorrection()
Accepts the current model candidate a new (better) model than the previous one.
Definition NonLinearOptimization.h:801
Scalar determineError()
Determines the error for the current model candidate (not the actual model).
Definition NonLinearOptimization.h:784
bool determineHessianAndErrorJacobian(Matrix &hessian, Matrix &jacobianError)
Determines the Hessian matrix and the Error-Jacobian vector based on the actual/current model (the tr...
Definition NonLinearOptimization.h:790
bool hasSolver() const
Returns whether the provider comes with an own solver for the linear equation.
Definition NonLinearOptimization.h:812
bool shouldStop()
Returns whether the optimization process should stop e.g., due to an external event.
Definition NonLinearOptimization.h:806
bool solve(const Matrix &hessian, const Matrix &jacobianError, Matrix &deltas) const
Solves the linear equation Hessian * deltas = -jacobianError.
Definition NonLinearOptimization.h:817
AdvancedDenseOptimizationProvider()
Protected default constructor.
Definition NonLinearOptimization.h:779
void applyCorrection(const Matrix &deltas)
Creates a new model candidate by adjusting the current model with delta values.
Definition NonLinearOptimization.h:796
This class implements the base class for an advanced sparse optimization provider.
Definition NonLinearOptimization.h:393
void acceptCorrection()
Accepts the current model candidate a new (better) model than the previous one.
Definition NonLinearOptimization.h:845
bool determineParameters()
Determines any kind of (abstract) parameters based on the current/actual model (not the model candida...
Definition NonLinearOptimization.h:834
bool shouldStop()
Returns whether the optimization process should stop e.g., due to an external event.
Definition NonLinearOptimization.h:850
Scalar determineError()
Determines the error for the current model candidate (not the actual/actual model).
Definition NonLinearOptimization.h:828
bool solve(Matrix &deltas, const Scalar lambda=Scalar(0))
Solves the linear equation Hessian * deltas = -jacobianError based on the internal data.
Definition NonLinearOptimization.h:856
void applyCorrection(const Matrix &deltas)
Creates a new model candidate by adjusting the current/actual model with delta values.
Definition NonLinearOptimization.h:840
AdvancedSparseOptimizationProvider()
Protected default constructor.
Definition NonLinearOptimization.h:823
This class implements the base class for an accessor of groups of pairs.
Definition NonLinearOptimization.h:46
CorrespondenceGroupsAccessor(CorrespondenceGroupsAccessor< TFirst, TSecond > &&accessor)
Move constructor.
Definition NonLinearOptimization.h:660
size_t groupElements(const size_t groupIndex) const
Returns the number of elements within a specified group.
Definition NonLinearOptimization.h:673
size_t groups() const
Returns the number of groups of this accessor.
Definition NonLinearOptimization.h:667
const TSecond & secondElement(const size_t groupIndex, const size_t elementIndex) const
Returns the first element of a pair of a specific group of this object.
Definition NonLinearOptimization.h:702
CorrespondenceGroupsAccessor< TFirst, TSecond > & operator=(CorrespondenceGroupsAccessor< TFirst, TSecond > &&accessor)
Move constructor.
Definition NonLinearOptimization.h:718
CorrespondenceGroupsAccessor(const ElementGroups &elementGroups)
Creates a new accessor object.
Definition NonLinearOptimization.h:639
CorrespondenceGroupsAccessor< TFirst, TSecond > & operator=(const CorrespondenceGroupsAccessor< TFirst, TSecond > &accessor)
Copy constructor.
Definition NonLinearOptimization.h:711
void element(const size_t groupIndex, const size_t elementIndex, TFirst &first, TSecond &second) const
Returns one pair of a specific group of this object.
Definition NonLinearOptimization.h:681
const TFirst & firstElement(const size_t groupIndex, const size_t elementIndex) const
Returns the first element of a pair of a specific group of this object.
Definition NonLinearOptimization.h:693
CorrespondenceGroupsAccessor()
Creates a new accessor object.
Definition NonLinearOptimization.h:633
std::vector< Elements > ElementGroups
Definition of a vector holding a group of elements.
Definition NonLinearOptimization.h:62
CorrespondenceGroupsAccessor(const CorrespondenceGroupsAccessor< TFirst, TSecond > &accessor)
Copy constructor.
Definition NonLinearOptimization.h:653
std::vector< Element > Elements
Definition of a vector holding elements.
Definition NonLinearOptimization.h:57
std::pair< Index32, Vector2 > Element
Definition of a pair combining an object point id with an image point.
Definition NonLinearOptimization.h:52
CorrespondenceGroupsAccessor(ElementGroups &&elementGroups)
Creates a new accessor object.
Definition NonLinearOptimization.h:646
ElementGroups elementGroups_
The groups of elements of this accessor.
Definition NonLinearOptimization.h:150
This class implements an abstract specialization of the accessor for groups of pairs for object point...
Definition NonLinearOptimization.h:158
ObjectPointGroupsAccessor()
Creates a new accessor object.
Definition NonLinearOptimization.h:728
This class implements a group accessor providing access to pairs of poses and image points.
Definition NonLinearOptimization.h:222
size_t addObjectPoint(Elements &&poseIdImagePointPairs)
Adds the observations of a new object point to this accessor.
Definition NonLinearOptimization.h:951
This class implements the base optimization provider.
Definition NonLinearOptimization.h:292
bool solve(const Matrix &JTJ, const Matrix &jErrors, Matrix &deltas) const
Solves the linear equation JTJ * deltas = jErrors.
Definition NonLinearOptimization.h:767
bool hasSolver() const
Returns whether the provider comes with an own solver for the linear equation.
Definition NonLinearOptimization.h:762
This class implements an abstract specialization of the accessor for groups of pairs for poses.
Definition NonLinearOptimization.h:184
PoseGroupsAccessor()
Creates a new accessor object.
Definition NonLinearOptimization.h:745
This class implements the basic functions for least square or robust optimization algorithms for non ...
Definition NonLinearOptimization.h:33
static bool denseOptimization(T &provider, const unsigned int iterations=5u, const Estimator::EstimatorType estimator=Estimator::ET_SQUARE, Scalar lambda=Scalar(0.001), const Scalar lambdaFactor=Scalar(5), Scalar *initialError=nullptr, Scalar *finalError=nullptr, const Matrix *invertedCovariances=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a dense (matrix) optimization problem.
Definition NonLinearOptimization.h:963
static bool sparseOptimization(T &provider, const unsigned int iterations=5u, const Estimator::EstimatorType estimator=Estimator::ET_SQUARE, Scalar lambda=Scalar(0.001), const Scalar lambdaFactor=Scalar(5), Scalar *initialError=nullptr, Scalar *finalError=nullptr, const Matrix *invertedCovariances=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a sparse (matrix) optimization problem.
Definition NonLinearOptimization.h:1192
static Scalar sqrErrors2robustErrors_i(const Scalars &sqrErrors, const size_t modelParameters, const size_t dimension, Scalar *weightedErrors_i, Scalar *weightVectors_i, const Matrix *transposedInvertedCovariances_i)
Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
Definition NonLinearOptimization.h:1802
static Scalar sqrErrors2robustErrors2(const Scalars &sqrErrors, const size_t modelParameters, Vector2 *weightedErrors, Vector2 *weightVectors, const SquareMatrix2 *transposedInvertedCovariances)
Translates the n/2 squared errors that correspond to n elements in the error vector to robust errors.
Definition NonLinearOptimization.h:1731
static bool advancedDenseOptimization(T &advancedDenseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar *initialError=nullptr, Scalar *finalError=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a dense (matrix) optimization problem using an advanced optimization prov...
Definition NonLinearOptimization.h:1431
static Scalar sqrErrors2robustErrors(const Scalars &sqrErrors, const size_t modelParameters, StaticBuffer< Scalar, tDimension > *weightedErrors, StaticBuffer< Scalar, tDimension > *weightVectors, const Matrix *transposedInvertedCovariances)
Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
Definition NonLinearOptimization.h:1766
static bool advancedSparseOptimization(T &advancedSparseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar *initialError=nullptr, Scalar *finalError=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a sparse (matrix) optimization problem using an advanced optimization pro...
Definition NonLinearOptimization.h:1597
This class implements an optimization for universal dense problems with one model (optimization probl...
Definition NonLinearUniversalOptimizationDense.h:32
MatrixT< T > transposedMultiply(const MatrixT< T > &right) const
Multiplies this transposed matrix with a second matrix.
void weightedSelfTransposedSquareMatrix(const MatrixT< T > &weightDiagonal, MatrixT< T > &result) const
Returns the matrix product of transposed matrix of this matrix and this matrix and applies a further ...
size_t columns() const
Returns the count of columns.
Definition Matrix.h:698
MatrixT< T > selfTransposedSquareMatrix() const
Returns the matrix product of transposed matrix of this matrix and this matrix.
size_t elements() const
Returns the number of entire elements, which is the product of rows and columns.
Definition Matrix.h:704
MatrixT< T > diagonal() const
Returns a vector containing the values of the diagonal.
T norm() const
Determines the L1 norm (sum of absolute elements) of this matrix.
Definition Matrix.h:772
@ MP_SYMMETRIC
The matrix is symmetric.
Definition Matrix.h:79
size_t rows() const
Returns the count of rows.
Definition Matrix.h:692
bool selfSquareDiagonalMatrixMultiply(const MatrixT< T > &right, MatrixT< T > &result) const
Interprets this matrix as diagonal matrix and multiplies a second matrix on the right of the interpre...
bool solve(const MatrixT< T > &b, MatrixT< T > &x, const MatrixProperty matrixProperty=MP_UNKNOWN) const
Solves the given linear system.
Definition Matrix.h:710
static constexpr T minValue()
Returns the min scalar value.
Definition Numeric.h:3253
static constexpr T weakEps()
Returns a weak epsilon.
static constexpr bool isWeakEqualEps(const T value)
Returns whether a value is smaller than or equal to a weak epsilon.
Definition Numeric.h:2165
static constexpr T eps()
Returns a small epsilon.
static bool isEqual(const T first, const T second)
Returns whether two values are equal up to a small epsilon.
Definition Numeric.h:2389
static constexpr T sqr(const T value)
Returns the square of a given value.
Definition Numeric.h:1495
static constexpr bool isEqualEps(const T value)
Returns whether a value is smaller than or equal to a small epsilon.
Definition Numeric.h:2090
static constexpr bool isNotEqualEps(const T value)
Returns whether a value is not smaller than or equal to a small epsilon.
Definition Numeric.h:2240
static T summedSqr(const T *values, const size_t number)
Returns the summed squares of a given values.
Definition Numeric.h:1514
static constexpr T maxValue()
Returns the max scalar value.
Definition Numeric.h:3247
This class implements a sparse matrix using a float type for its elements that is specified by T.
Definition SparseMatrix.h:61
bool isZero(const size_t row, const size_t column) const
Returns whether a specified elements is zero.
SparseMatrixT< T > transposed() const
Returns the transposes matrix of this matrix.
size_t rows() const
Returns the number of rows this matrix has.
size_t columns() const
Returns the number of columns this matrix has.
bool solve(const MatrixT< T > &b, MatrixT< T > &x) const
Solves the given linear system.
MatrixT< T > diagonal() const
Returns a vector containing the values of the diagonal.
This class implements a 2x2 square matrix.
Definition SquareMatrix2.h:73
SquareMatrixT2< T > transposed() const
Returns the transposed of this matrix.
Definition SquareMatrix2.h:665
This class implements a static buffer that has a fixed capacity.
Definition StaticBuffer.h:24
const T * data() const
Returns the buffer data pointer.
Definition StaticBuffer.h:240
unsigned int sqr(const char value)
Returns the square value of a given value.
Definition base/Utilities.h:1053
uint32_t Index32
Definition of a 32 bit index value.
Definition Base.h:84
SparseMatrixT< Scalar > SparseMatrix
Definition of the SparseMatrix object, depending on the OCEAN_MATH_USE_SINGLE_PRECISION either with s...
Definition SparseMatrix.h:30
float Scalar
Definition of a scalar type.
Definition Math.h:129
std::vector< Scalar > Scalars
Definition of a vector holding Scalar objects.
Definition Math.h:145
VectorT2< Scalar > Vector2
Definition of a 2D vector.
Definition Vector2.h:28
The namespace covering the entire Ocean framework.
Definition Accessor.h:15