Ocean
Loading...
Searching...
No Matches
NonLinearOptimization.h
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7
8#ifndef META_OCEAN_GEOMETRY_NON_LINEAR_OPTIMIZATION_H
9#define META_OCEAN_GEOMETRY_NON_LINEAR_OPTIMIZATION_H
10
13
14#include "ocean/base/Accessor.h"
16#include "ocean/base/Worker.h"
17
18#include "ocean/math/Matrix.h"
21
22namespace Ocean
23{
24
25namespace Geometry
26{
27
28/**
29 * This class implements the basic functions for least square or robust optimization algorithms for non linear functions.
30 * The actual optimization algorithms are located in derived functions.
31 * @ingroup geometry
32 */
33class OCEAN_GEOMETRY_EXPORT NonLinearOptimization
34{
35 template <unsigned int, unsigned int, unsigned int> friend class NonLinearUniversalOptimizationDense;
36
37 public:
38
39 /**
40 * This class implements the base class for an accessor of groups of pairs.
41 * Each group of pairs may have an arbitrary number of elements.
42 * @tparam TFirst The data type of the first element of each pair
43 * @tparam TSecond The data type of the second element of each pair
44 */
45 template <typename TFirst, typename TSecond>
47 {
48 protected:
49
50 /**
51 * Definition of a pair combining an object point id with an image point.
52 */
53 typedef std::pair<Index32, Vector2> Element;
54
55 /**
56 * Definition of a vector holding elements.
57 */
58 typedef std::vector<Element> Elements;
59
60 /**
61 * Definition of a vector holding a group of elements.
62 */
63 typedef std::vector<Elements> ElementGroups;
64
65 public:
66
67 /**
68 * Copy constructor.
69 * @param accessor Accessor to copy
70 */
72
73 /**
74 * Move constructor.
75 * @param accessor Accessor to move
76 */
78
79 /**
80 * Returns the number of groups of this accessor.
81 * @return The number of groups
82 */
83 inline size_t groups() const;
84
85 /**
86 * Returns the number of elements within a specified group.
87 * @param groupIndex The index of the group for which the number of elements is requested, with range [0, groups())
88 * @return The number of elements within the specified group
89 */
90 inline size_t groupElements(const size_t groupIndex) const;
91
92 /**
93 * Returns one pair of a specific group of this object.
94 * @param groupIndex The index of the group, with range [0, groups())
95 * @param elementIndex The index of the element within the specified group, with range [0, groupElements(groupIndex))
96 * @param first The resulting first element of the specified pair
97 * @param second The resulting second element of the specified pair
98 */
99 inline void element(const size_t groupIndex, const size_t elementIndex, TFirst& first, TSecond& second) const;
100
101 /**
102 * Returns the first element of a pair of a specific group of this object.
103 * @param groupIndex The index of the group, with range [0, groups())
104 * @param elementIndex The index of the element within the specified group, with range [0, groupElements(groupIndex))
105 * @return The first element of the specified pair
106 */
107 inline const TFirst& firstElement(const size_t groupIndex, const size_t elementIndex) const;
108
109 /**
110 * Returns the first element of a pair of a specific group of this object.
111 * @param groupIndex The index of the group, with range [0, groups())
112 * @param elementIndex The index of the element within the specified group, with range [0, groupElements(groupIndex))
113 * @return The second element of the specified pair
114 */
115 inline const TSecond& secondElement(const size_t groupIndex, const size_t elementIndex) const;
116
117 /**
118 * Copy constructor.
119 * @param accessor Accessor to copy
120 */
122
123 /**
124 * Move constructor.
125 * @param accessor Accessor to move
126 */
128
129 protected:
130
131 /**
132 * Creates a new accessor object.
133 */
135
136 /**
137 * Creates a new accessor object.
138 * @param elementGroups The element groups of the object
139 */
140 inline CorrespondenceGroupsAccessor(const ElementGroups& elementGroups);
141
142 /**
143 * Creates a new accessor object.
144 * @param elementGroups The element groups of the object to move
145 */
147
148 protected:
149
150 /// The groups of elements of this accessor.
152 };
153
154 /**
155 * This class implements an abstract specialization of the accessor for groups of pairs for object points.
156 * The accessor provides one group for each object point while each group holds pairs of correspondences between poses and image points.
157 */
159 {
160 protected:
161
162 /**
163 * Creates a new accessor object.
164 */
166
167 /**
168 * Creates a new accessor object.
169 * @param elementGroups The element groups of the object
170 */
171 inline ObjectPointGroupsAccessor(const ElementGroups& elementGroups);
172
173 /**
174 * Creates a new accessor object.
175 * @param elementGroups The element groups of the object to move
176 */
177 inline ObjectPointGroupsAccessor(ElementGroups&& elementGroups);
178 };
179
180 /**
181 * This class implements an abstract specialization of the accessor for groups of pairs for poses.
182 * The accessor provides one group for each pose while each group holds pairs of correspondences between object points and image points.
183 */
184 class PoseGroupsAccessor : public CorrespondenceGroupsAccessor<Index32, Vector2>
185 {
186 protected:
187
188 /**
189 * Creates a new accessor object.
190 */
191 inline PoseGroupsAccessor();
192
193 /**
194 * Creates a new accessor object.
195 * @param elementGroups The element groups of the object
196 */
197 inline PoseGroupsAccessor(const ElementGroups& elementGroups);
198
199 /**
200 * Creates a new accessor object.
201 * @param elementGroups The element groups of the object to move
202 */
203 inline PoseGroupsAccessor(ElementGroups&& elementGroups);
204 };
205
206 /**
207 * This class implements a group accessor providing access to pairs of poses and image points.
208 * The groups of pairs have the following structure, each object point can have an arbitrary number of pairs:
209 * <pre>
210 * objectpoint_0 -> (pose_0, imagePoint_0)
211 * -> (pose_1, imagePoint_4)
212 * -> (pose_5, imagePoint_9)
213 *
214 * objectPoint_1 -> (pose_0, imagePoint_2)
215 *
216 * objectPoint_2 -> (pose_2, imagePoint_3)
217 * (pose_1, imagePoint_8)
218 *
219 * objectPoint_3 -> (pose_9, imagePoint_5)
220 * </pre>
221 */
223 {
224 public:
225
226 /**
227 * Default constructor.
228 */
230
231 /**
232 * Creates a new accessor object for two sets of image points which are the observations of the same object points in two individual camera frames.
233 * @param imagePoints0 The first set of image points which are the observations of the object points in the first pose
234 * @param imagePoints1 The second set of image points which are the observations of the object points in the second pose
235 * @tparam TAccessor The data type of the accessor providing the image points
236 */
237 template <typename TAccessor>
238 ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const TAccessor& imagePoints0, const TAccessor& imagePoints1);
239
240 /**
241 * Creates a new accessor object for a set of object points all observed by the same number of image points in a set of camera poses.
242 * Thus, the number of corresponding image points for one object points is identical to the number of camera poses.
243 * @param imagePointGroups The group of image points observing the object points, one group for each object point, the indices of the image points are identical to the indices of the corresponding camera poses
244 * @tparam TAccessor The data type of the accessor providing the image points
245 */
246 template <typename TAccessor>
247 explicit ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const std::vector<TAccessor>& imagePointGroups);
248
249 /**
250 * Creates a new accessor object for a set of object points all observed by the same number of image points in a set of camera poses.
251 * Thus, the number of corresponding image points for one object points is identical to the number of camera poses.<br>
252 * The provided image points have the following pattern for n object points:<br>
253 * <pre>
254 * Group 0 / Object Point 0 , Group 1 / Object Point 1 , Group 2 / Object Point2 ...
255 * i0, i1, i2, i3, i4, i5, ..., in, in+1, in+2, in+3, in+5, ..., in+n+1, in+n+2, ...
256 * </pre>
257 * where i0, in+1, in+n+1 ... are the image points located in the first camera frame; i1, in+2, in+n+2 ... are image points located in the second frame a.s.o.
258 * @param sequentialImagePointGroups The group of image points observing the object points provided sequentially, one sequential group for each object point, the indices of the image points (within the sequential groups) are identical to the indices of the corresponding camera poses, while imagePointGroups.size() is a multiple of numberObjectPoints
259 * @param numberObjectPoints The number of object points (the number of sequential groups of image points), with range [1, infinity)
260 * @tparam TAccessor The data type of the accessor providing the image points
261 */
262 template <typename TAccessor>
263 ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const TAccessor& sequentialImagePointGroups, const size_t numberObjectPoints);
264
265 /**
266 * Creates a new accessor object for a set of object points all observed by the same number of image points in a set of camera poses.
267 * Thus, the number of corresponding image points for one object points is identical to the number of camera poses.<br>
268 * The provided image points have the following pattern for n object points:<br>
269 * <pre>
270 * Camera pose 0 , Camera pose 1 , Camera pose 2 ...
271 * i0, i1, i2, i3, i4, i5, ..., in, in+1, in+2, in+3, in+5, ..., in+n+1, in+n+2, ...
272 * </pre>
273 * where i0, in+1, in+n+1 ... are the image points observing the first object point; i1, in+2, in+n+2 ... are image points observing the second object point a.s.o.
274 * @param numberObjectPoints The number of object points, with range [1, infinity)
275 * @param imagePoints The set of image points observing the object points provided in sequential groups, each group holds all image points for one camera pose, while imagePoint.size() is a multiple of numberObjectPoints
276 * @tparam TAccessor The data type of the accessor providing the image points
277 */
278 template <typename TAccessor>
279 ObjectPointToPoseIndexImagePointCorrespondenceAccessor(const size_t numberObjectPoints, const TAccessor& imagePoints);
280
281 /**
282 * Adds the observations of a new object point to this accessor.
283 * @param poseIdImagePointPairs The pairs combining the pose id and the image point locations, at least one
284 * @return The index of the new object point which has been added, with range [0, infinity)
285 */
286 inline size_t addObjectPoint(Elements&& poseIdImagePointPairs);
287 };
288
289 /**
290 * This class implements the base optimization provider.
291 */
293 {
294 public:
295
296 /**
297 * Returns whether the provider comes with an own solver for the linear equation.
298 * @return True, if so
299 */
300 inline bool hasSolver() const;
301
302 /**
303 * Solves the linear equation JTJ * deltas = jErrors.
304 * @param JTJ The Hesse matrix (transposed jacobian multiplied by the jacobian)
305 * @param jErrors The individual error values
306 * @param deltas The resulting individual delta values
307 * @return True, if the equation could be solved
308 */
309 inline bool solve(const Matrix& JTJ, const Matrix& jErrors, Matrix& deltas) const;
310
311 /**
312 * Solves the linear equation JTJ * deltas = jErrors.
313 * @param JTJ The Hesse matrix (transposed jacobian multiplied by the jacobian)
314 * @param jErrors The individual error values
315 * @param deltas The resulting individual delta values
316 * @return True, if the equation could be solved
317 */
318 inline bool solve(const SparseMatrix& JTJ, const Matrix& jErrors, Matrix& deltas) const;
319 };
320
321 /**
322 * This class implements the base for an advanced dense optimization provider.
323 * The advanced optimization provider is able to determine an overall error as well as the entire Hessian matrix and Jacobian-Error vector for any intermediate Levenberg-Marquardt optimization step.
324 * @see OptimizationProvider
325 */
327 {
328 public:
329
330 /**
331 * Determines the error for the current model candidate (not the actual model).
332 * Needs to be implemented in the actual provider.
333 * @return The error for the current model candidate
334 */
335 inline Scalar determineError();
336
337 /**
338 * Determines the Hessian matrix and the Error-Jacobian vector based on the actual/current model (the transposed jacobian multiplied with the individual errors).
339 * Needs to be implemented in the actual provider.
340 * @param hessian The resulting (n x n) Hessian matrix for a model with dimension n, which is the transposed Jacobian multiplied with the Jacobian, may be the approximated Hessian
341 * @param jacobianError The resulting (n x 1) Error-Jacobian vector
342 * @return True, if succeeded
343 */
344 inline bool determineHessianAndErrorJacobian(Matrix& hessian, Matrix& jacobianError);
345
346 /**
347 * Creates a new model candidate by adjusting the current model with delta values.
348 * Needs to be implemented in the actual provider.
349 * @param deltas The delta values to be used applied
350 */
351 inline void applyCorrection(const Matrix& deltas);
352
353 /**
354 * Accepts the current model candidate a new (better) model than the previous one.
355 * Needs to be implemented in the actual provider.
356 */
357 inline void acceptCorrection();
358
359 /**
360 * Returns whether the optimization process should stop e.g., due to an external event.
361 * Needs to be implemented in the actual provider.
362 * @return True, to stop the optimization process
363 */
364 inline bool shouldStop();
365
366 /**
367 * Returns whether the provider comes with an own solver for the linear equation.
368 * @return True, if so
369 */
370 inline bool hasSolver() const;
371
372 /**
373 * Solves the linear equation Hessian * deltas = -jacobianError.
374 * @param hessian The Hesse matrix (transposed jacobian multiplied by the jacobian)
375 * @param jacobianError The transposed jacobian multiplied with the individual errors
376 * @param deltas The resulting individual delta values
377 * @return True, if the equation could be solved
378 */
379 inline bool solve(const Matrix& hessian, const Matrix& jacobianError, Matrix& deltas) const;
380
381 protected:
382
383 /**
384 * Protected default constructor.
385 */
387 };
388
389 /**
390 * This class implements the base class for an advanced sparse optimization provider.
391 * @see AdvancedDenseOptimizationProvider, OptimizationProvider
392 */
394 {
395 public:
396
397 /**
398 * Determines the error for the current model candidate (not the actual/actual model).
399 * Needs to be implemented in the actual provider.
400 * @return The error for the current model candidate
401 */
402 inline Scalar determineError();
403
404 /**
405 * Determines any kind of (abstract) parameters based on the current/actual model (not the model candidate) e.g., the Jacobian parameters and/or a Hessian matrix.
406 * The provide is responsible for the actual data.<br>
407 * Needs to be implemented in the actual provider.
408 * @return True, if succeeded
409 */
410 inline bool determineParameters();
411
412 /**
413 * Creates a new model candidate by adjusting the current/actual model with delta values.
414 * Needs to be implemented in the actual provider.
415 * @param deltas The delta values to be used applied
416 */
417 inline void applyCorrection(const Matrix& deltas);
418
419 /**
420 * Accepts the current model candidate a new (better) model than the previous one.
421 * Needs to be implemented in the actual provider.
422 */
423 inline void acceptCorrection();
424
425 /**
426 * Returns whether the optimization process should stop e.g., due to an external event.
427 * Needs to be implemented in the actual provider.
428 * @return True, to stop the optimization process
429 */
430 inline bool shouldStop();
431
432 /**
433 * Solves the linear equation Hessian * deltas = -jacobianError based on the internal data.
434 * The diagonal of the Hessian matrix will be multiplied by (1 + lambda) before the actual solving starts.<br>
435 * This function may be invoked several times before determineParameters() is invoked again so that the diagonal of the original Hessian matrix should be copied.
436 * @param deltas The resulting individual delta values
437 * @param lambda Optional lambda value for a Levenberg-Marquardt optimization, with range [0, infinity)
438 * @return True, if the equation could be solved
439 */
440 inline bool solve(Matrix& deltas, const Scalar lambda = Scalar(0));
441
442 protected:
443
444 /**
445 * Protected default constructor.
446 */
448 };
449
450 public:
451
452 /**
453 * Invokes the optimization of a dense (matrix) optimization problem.
454 * @param provider The optimization provider that is used during the optimization
455 * @param iterations Number of optimization iterations
456 * @param estimator Robust estimator to be applied
457 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
458 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
459 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
460 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
461 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
462 * @param intermediateErrors Optional resulting intermediate (improving) errors in relation to the defined estimator
463 * @return True, if at least one successful optimization iteration has been executed
464 * @tparam T Data type of the optimization provider
465 */
466 template <typename T>
467 static inline bool denseOptimization(T& provider, const unsigned int iterations = 5u, const Estimator::EstimatorType estimator = Estimator::ET_SQUARE, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
468
469 /**
470 * Invokes the optimization of a dense (matrix) optimization problem.
471 * @param provider The optimization provider that is used during the optimization
472 * @param iterations Number of optimization iterations
473 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
474 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
475 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
476 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
477 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
478 * @param intermediateErrors Optional resulting intermediate (improving) errors
479 * @return True, if at least one successful optimization iteration has been executed
480 * @tparam T Data type of the optimization provider
481 * @tparam tEstimator Type of the robust estimator to be applied
482 */
483 template <typename T, Estimator::EstimatorType tEstimator>
484 static bool denseOptimization(T& provider, const unsigned int iterations = 5u, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
485
486 /**
487 * Invokes the optimization of a sparse (matrix) optimization problem.
488 * @param provider The optimization provider that is used during the optimization
489 * @param iterations Number of optimization iterations
490 * @param estimator Robust estimator to be applied
491 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
492 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
493 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
494 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
495 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
496 * @param intermediateErrors Optional resulting intermediate (improving) errors
497 * @return True, if at least one successful optimization iteration has been executed
498 * @tparam T Data type of the optimization provider
499 */
500 template <typename T>
501 static bool sparseOptimization(T& provider, const unsigned int iterations = 5u, const Estimator::EstimatorType estimator = Estimator::ET_SQUARE, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
502
503 /**
504 * Invokes the optimization of a sparse (matrix) optimization problem.
505 * @param provider The optimization provider that is used during the optimization
506 * @param iterations Number of optimization iterations
507 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity)
508 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity)
509 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
510 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
511 * @param invertedCovariances Optional set of inverted covariances that define the individual uncertainties of the measurements
512 * @param intermediateErrors Optional resulting intermediate (improving) errors
513 * @return True, if at least one successful optimization iteration has been executed
514 * @tparam T Data type of the optimization provider
515 * @tparam tEstimator Type of the robust estimator to be applied
516 */
517 template <typename T, Estimator::EstimatorType tEstimator>
518 static bool sparseOptimization(T& provider, const unsigned int iterations = 5u, Scalar lambda = Scalar(0.001), const Scalar lambdaFactor = Scalar(5), Scalar* initialError = nullptr, Scalar* finalError = nullptr, const Matrix* invertedCovariances = nullptr, Scalars* intermediateErrors = nullptr);
519
520 /**
521 * Invokes the optimization of a dense (matrix) optimization problem using an advanced optimization provider.
522 * The optimization can use a Levenberg-Marquardt approach or a Gauss-Newton approach.
523 * @param advancedDenseProvider The advanced dense optimization provider that is used during the optimization
524 * @param iterations Number of optimization iterations
525 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity), 0 to apply a Gauss-Netwton optimization approach
526 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity), 1 to apply a Gauss-Newton optimization approach
527 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
528 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
529 * @param intermediateErrors Optional resulting intermediate (improving) errors
530 * @return True, if at least one successful optimization iteration has been executed
531 * @tparam T Data type of the advanced dense optimization provider, must be derived from AdvancedDenseOptimizationProvider
532 */
533 template <typename T>
534 static bool advancedDenseOptimization(T& advancedDenseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError = nullptr, Scalar* finalError = nullptr, Scalars* intermediateErrors = nullptr);
535
536 /**
537 * Invokes the optimization of a sparse (matrix) optimization problem using an advanced optimization provider.
538 * The optimization can use a Levenberg-Marquardt approach or a Gauss-Newton approach.
539 * @param advancedSparseProvider The advanced sparse optimization provider that is used during the optimization
540 * @param iterations Number of optimization iterations
541 * @param lambda Initial Levenberg-Marquardt damping value which may be changed after each iteration using the damping factor, with range [0, infinity), 0 to apply a Gauss-Netwton optimization approach
542 * @param lambdaFactor Levenberg-Marquardt damping factor to be applied to the damping value, with range [1, infinity), 1 to apply a Gauss-Newton optimization approach
543 * @param initialError Optional resulting averaged robust (depending on estimator) pixel error for the given initial parameters
544 * @param finalError Optional resulting averaged robust (depending on estimator) pixel error for the final optimized parameters
545 * @param intermediateErrors Optional resulting intermediate (improving) errors
546 * @return True, if at least one successful optimization iteration has been executed
547 * @tparam T Data type of the advanced sparse optimization provider, must be derived from AdvancedSparseOptimizationProvider
548 */
549 template <typename T>
550 static bool advancedSparseOptimization(T& advancedSparseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError = nullptr, Scalar* finalError = nullptr, Scalars* intermediateErrors = nullptr);
551
552 /**
553 * Translates the n/2 squared errors that correspond to n elements in the error vector to robust errors.
554 * @param sqrErrors The n/2 squared errors
555 * @param modelParameters Number of parameters that define the model that has to be optimized
556 * @param weightedErrors Initial n errors that will be translated to robust errors
557 * @param weightVectors The n individual weights that have been applied to the errors
558 * @param transposedInvertedCovariances Optional transposed 2x2 inverted covariance matrices, one for each pair of errors
559 * @return Resulting averaged robust error of the entire data set
560 * @tparam tEstimator Robust error estimator to be used
561 */
562 template <Estimator::EstimatorType tEstimator>
563 static Scalar sqrErrors2robustErrors2(const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances);
564
565 /**
566 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
567 * @param sqrErrors The n/i squared errors
568 * @param modelParameters Number of parameters that define the model that has to be optimized
569 * @param weightedErrors Initial n errors that will be translated to robust errors
570 * @param weightVectors The n individual weights that have been applied to the errors
571 * @param transposedInvertedCovariances Optional transposed ixi inverted covariance matrices, one for each pair of errors
572 * @return Resulting averaged robust error of the entire data set
573 * @tparam tEstimator Robust error estimator to be used
574 * @tparam tDimension Dimension of one error element
575 */
576 template <Estimator::EstimatorType tEstimator, size_t tDimension>
577 static Scalar sqrErrors2robustErrors(const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances);
578
579 /**
580 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
581 * @param sqrErrors The n/i squared errors
582 * @param modelParameters Number of parameters that define the model that has to be optimized
583 * @param dimension The dimension of one error element, with range [1, infinity)
584 * @param weightedErrors_i Initial n errors that will be translated to robust errors
585 * @param weightVectors_i The n individual weights that have been applied to the errors
586 * @param transposedInvertedCovariances_i Optional transposed ixi inverted covariance matrices, one for each pair of errors
587 * @return Resulting averaged robust error of the entire data set
588 * @tparam tEstimator Robust error estimator to be used
589 */
590 template <Estimator::EstimatorType tEstimator>
591 static Scalar sqrErrors2robustErrors_i(const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i);
592
593 /**
594 * Translates the n/2 squared errors that correspond to n elements in the error vector to robust errors.
595 * @param estimator Robust error estimator to be used
596 * @param sqrErrors The n/2 squared errors
597 * @param modelParameters Number of parameters that define the model that has to be optimized
598 * @param weightedErrors Initial n errors that will be translated to robust errors
599 * @param weightVectors The n individual weights that have been applied to the errors
600 * @param transposedInvertedCovariances Optional transposed 2x2 inverted covariance matrices, one for each pair of errors
601 * @return Resulting averaged robust error of the entire data set
602 */
603 static inline Scalar sqrErrors2robustErrors2(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances);
604
605 /**
606 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
607 * @param estimator Robust error estimator to be used
608 * @param sqrErrors The n/i squared errors
609 * @param modelParameters Number of parameters that define the model that has to be optimized
610 * @param weightedErrors Initial n errors that will be translated to robust errors
611 * @param weightVectors The n individual weights that have been applied to the errors
612 * @param transposedInvertedCovariances Optional transposed ixi inverted covariance matrices, one for each pair of errors
613 * @return Resulting averaged robust error of the entire data set
614 * @tparam tDimension Dimension of one error element
615 */
616 template <size_t tDimension>
617 static inline Scalar sqrErrors2robustErrors(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances);
618
619 /**
620 * Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
621 * @param estimator Robust error estimator to be used
622 * @param sqrErrors The n/i squared errors
623 * @param modelParameters Number of parameters that define the model that has to be optimized
624 * @param dimension The dimension of one error element, with range [1, infinity)
625 * @param weightedErrors_i Initial n errors that will be translated to robust errors
626 * @param weightVectors_i The n individual weights that have been applied to the errors
627 * @param transposedInvertedCovariances_i Optional transposed ixi inverted covariance matrices, one for each pair of errors
628 * @return Resulting averaged robust error of the entire data set
629 */
630 static inline Scalar sqrErrors2robustErrors_i(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i);
631};
632
633template <typename TFirst, typename TSecond>
638
639template <typename TFirst, typename TSecond>
641 elementGroups_(elementGroups)
642{
643 // nothing to do here
644}
645
646template <typename TFirst, typename TSecond>
648 elementGroups_(std::move(elementGroups))
649{
650 // nothing to do here
651}
652
653template <typename TFirst, typename TSecond>
655 elementGroups_(accessor.elementGroups_)
656{
657 // nothing to do here
658}
659
660template <typename TFirst, typename TSecond>
662 elementGroups_(std::move(accessor.elementGroups_))
663{
664 // nothing to do here
665}
666
667template <typename TFirst, typename TSecond>
669{
670 return elementGroups_.size();
671}
672
673template <typename TFirst, typename TSecond>
675{
676 ocean_assert(groupIndex < groups());
677
678 return elementGroups_[groupIndex].size();
679}
680
681template <typename TFirst, typename TSecond>
682inline void NonLinearOptimization::CorrespondenceGroupsAccessor<TFirst, TSecond>::element(const size_t groupIndex, const size_t elementIndex, TFirst& first, TSecond& second) const
683{
684 ocean_assert(groupIndex < groups());
685 ocean_assert(elementIndex < groupElements(groupIndex));
686
687 const Element& element = elementGroups_[groupIndex][elementIndex];
688
689 first = element.first;
690 second = element.second;
691}
692
693template <typename TFirst, typename TSecond>
694inline const TFirst& NonLinearOptimization::CorrespondenceGroupsAccessor<TFirst, TSecond>::firstElement(const size_t groupIndex, const size_t elementIndex) const
695{
696 ocean_assert(groupIndex < groups());
697 ocean_assert(elementIndex < groupElements(groupIndex));
698
699 return elementGroups_[groupIndex][elementIndex].first;
700}
701
702template <typename TFirst, typename TSecond>
703inline const TSecond& NonLinearOptimization::CorrespondenceGroupsAccessor<TFirst, TSecond>::secondElement(const size_t groupIndex, const size_t elementIndex) const
704{
705 ocean_assert(groupIndex < groups());
706 ocean_assert(elementIndex < groupElements(groupIndex));
707
708 return elementGroups_[groupIndex][elementIndex].second;
709}
710
711template <typename TFirst, typename TSecond>
717
718template <typename TFirst, typename TSecond>
720{
721 if (this != &accessor)
722 {
723 elementGroups_ = std::move(accessor.elementGroups_);
724 }
725
726 return *this;
727}
728
733
736{
737 // nothing to do here
738}
739
741 CorrespondenceGroupsAccessor<Index32, Vector2>(std::move(elementGroups))
742{
743 // nothing to do here
744}
745
747{
748 // nothing to do here
749}
750
751inline NonLinearOptimization::PoseGroupsAccessor::PoseGroupsAccessor(const ElementGroups& elementGroups) :
753{
754 // nothing to do here
755}
756
758 CorrespondenceGroupsAccessor<Index32, Vector2>(std::move(elementGroups))
759{
760 // nothing to do here
761}
762
764{
765 return false;
766}
767
768inline bool NonLinearOptimization::OptimizationProvider::solve(const Matrix& /*JTJ*/, const Matrix& /*jErrors*/, Matrix& /*deltas*/) const
769{
770 ocean_assert(false && "Must be implemented in a derived class!");
771 return false;
772}
773
774inline bool NonLinearOptimization::OptimizationProvider::solve(const SparseMatrix& /*JTJ*/, const Matrix& /*jErrors*/, Matrix& /*deltas*/) const
775{
776 ocean_assert(false && "Must be implemented in a derived class!");
777 return false;
778}
779
784
786{
787 ocean_assert(false && "Must be implemented in a derived class!");
788 return Numeric::maxValue();
789}
790
792{
793 ocean_assert(false && "Must be implemented in a derived class!");
794 return false;
795}
796
798{
799 ocean_assert(false && "Must be implemented in a derived class!");
800}
801
803{
804 ocean_assert(false && "Must be implemented in a derived class!");
805}
806
808{
809 ocean_assert(false && "Must be implemented in a derived class!");
810 return true;
811}
812
814{
815 return false;
816}
817
818inline bool NonLinearOptimization::AdvancedDenseOptimizationProvider::solve(const Matrix& /*hessian*/, const Matrix& /*jacobianError*/, Matrix& /*deltas*/) const
819{
820 ocean_assert(false && "Must be implemented in a derived class!");
821 return false;
822}
823
828
830{
831 ocean_assert(false && "Must be implemented in a derived class!");
832 return Numeric::maxValue();
833}
834
836{
837 ocean_assert(false && "Must be implemented in a derived class!");
838 return false;
839}
840
842{
843 ocean_assert(false && "Must be implemented in a derived class!");
844}
845
847{
848 ocean_assert(false && "Must be implemented in a derived class!");
849}
850
852{
853 ocean_assert(false && "Must be implemented in a derived class!");
854 return true;
855}
856
858{
859 ocean_assert(false && "Must be implemented in a derived class!");
860 return true;
861}
862
863template <typename TAccessor>
865 ObjectPointGroupsAccessor(ElementGroups(imagePoints0.size(), Elements(2)))
866{
867 ocean_assert(imagePoints0.size() == imagePoints1.size());
868
869 for (size_t n = 0; n < imagePoints0.size(); ++n)
870 {
871 elementGroups_[n][0].first = 0;
872 elementGroups_[n][0].second = imagePoints0[n];
873
874 elementGroups_[n][1].first = 1;
875 elementGroups_[n][1].second = imagePoints1[n];
876 }
877}
878
879template <typename TAccessor>
881 ObjectPointGroupsAccessor(ElementGroups(imagePointGroups.size()))
882{
883 ocean_assert(elementGroups_.size() == imagePointGroups.size());
884
885 for (size_t n = 0; n < imagePointGroups.size(); ++n)
886 {
887 const ConstIndexedAccessor<Vector2>& imagePoints = imagePointGroups[n];
888 ocean_assert(imagePoints.size() >= 2);
889
890 Elements poseIndexImagePoints;
891 poseIndexImagePoints.reserve(imagePoints.size());
892
893 for (size_t i = 0; i < imagePoints.size(); ++i)
894 {
895 poseIndexImagePoints.emplace_back(Index32(i), imagePoints[i]);
896 }
897
898 elementGroups_[n] = std::move(poseIndexImagePoints);
899 }
900}
901
902template <typename TAccessor>
904 ObjectPointGroupsAccessor(ElementGroups(numberObjectPoints))
905{
906 ocean_assert(elementGroups_.size() == numberObjectPoints);
907 ocean_assert(sequentialImagePointGroups.size() % numberObjectPoints == 0);
908
909 const unsigned int numberImagePoints = (unsigned int)(sequentialImagePointGroups.size() / numberObjectPoints);
910
911 unsigned int index = 0u;
912
913 for (size_t n = 0; n < numberObjectPoints; ++n)
914 {
915 Elements poseIndexImagePoints;
916 poseIndexImagePoints.reserve(numberImagePoints);
917
918 for (unsigned int i = 0u; i < numberImagePoints; ++i)
919 {
920 poseIndexImagePoints.emplace_back(Index32(i), sequentialImagePointGroups[index++]);
921 }
922
923 elementGroups_[n] = std::move(poseIndexImagePoints);
924 }
925}
926
927template <typename TAccessor>
929 ObjectPointGroupsAccessor(ElementGroups(numberObjectPoints, Elements(numberObjectPoints == 0 ? 0 : (imagePoints.size() / numberObjectPoints))))
930{
931 ocean_assert(elementGroups_.size() == numberObjectPoints);
932 ocean_assert(imagePoints.size() % numberObjectPoints == 0);
933
934 ocean_assert(numberObjectPoints != 0);
935 const unsigned int numberImagePoints = (unsigned int)(imagePoints.size() / numberObjectPoints);
936
937 unsigned int poseIndex = 0u;
938 unsigned int index = 0u;
939
940 for (size_t n = 0; n < numberImagePoints; ++n)
941 {
942 for (size_t i = 0; i < numberObjectPoints; ++i)
943 {
944 ocean_assert(elementGroups_[i].size() == numberImagePoints);
945 elementGroups_[i][poseIndex] = Element(poseIndex, imagePoints[index++]);
946 }
947
948 poseIndex++;
949 }
950}
951
953{
954 ocean_assert(!poseIdImagePointPairs.empty());
955
956 const size_t objectPointIndex = elementGroups_.size();
957
958 elementGroups_.emplace_back(std::move(poseIdImagePointPairs));
959
960 return objectPointIndex;
961}
962
963template <typename T>
964inline bool NonLinearOptimization::denseOptimization(T& provider, const unsigned int iterations, const Estimator::EstimatorType estimator, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
965{
966 switch (estimator)
967 {
969 return denseOptimization<T, Estimator::ET_SQUARE>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
970
972 return denseOptimization<T, Estimator::ET_LINEAR>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
973
975 return denseOptimization<T, Estimator::ET_HUBER>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
976
978 return denseOptimization<T, Estimator::ET_TUKEY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
979
981 return denseOptimization<T, Estimator::ET_CAUCHY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
982
983 default:
984 ocean_assert(false && "Invalid estimator!");
985 return false;
986 }
987}
988
989template <typename T, Estimator::EstimatorType tEstimator>
990bool NonLinearOptimization::denseOptimization(T& provider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
991{
992 constexpr Scalar maxLambda = Scalar(1e8);
993
994 ocean_assert(lambda >= Numeric::eps() && lambda <= maxLambda);
995
996 Matrix jacobian;
997
998 // the error vectors are weighted for non-square estimators only
999 Matrix weightedErrorVector, swapWeightedErrorVector;
1000 // the weight vectors are used for non-square estimators only
1001 Matrix weightVector, swapWeightVector;
1002
1003 Matrix JTJ, jErrors, deltas;
1004
1005 // intermediate matrices that are used if a covariance matrix is provided
1006 Matrix intermediateCovarianceJacobian;
1007 Matrix intermediateWeightedErrorVector;
1008
1009 Scalar bestError = provider.template determineRobustError<tEstimator>(weightedErrorVector, weightVector, invertedCovariances);
1010
1011 if (bestError == Numeric::maxValue())
1012 {
1013 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1014 return false;
1015 }
1016
1017 if (initialError != nullptr)
1018 {
1019 *initialError = bestError;
1020 }
1021
1022 if (intermediateErrors != nullptr)
1023 {
1024 ocean_assert(intermediateErrors->empty());
1025 intermediateErrors->push_back(bestError);
1026 }
1027
1028 bool oneValidIteration = false;
1029
1030 unsigned int i = 0u;
1031 while (i < iterations)
1032 {
1033 provider.determineJacobian(jacobian);
1034
1035 // if inverted covariance matrices are provided a further matrix multiplication must be respected
1036 if (invertedCovariances)
1037 {
1038 if constexpr (tEstimator == Estimator::ET_SQUARE)
1039 {
1040 // J^+ = (J^T * iCV * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1041
1042 invertedCovariances->selfSquareDiagonalMatrixMultiply(jacobian, intermediateCovarianceJacobian);
1043 jacobian.transposedMultiply(intermediateCovarianceJacobian, JTJ);
1044 ocean_assert(JTJ.columns() == JTJ.rows());
1045 }
1046 else
1047 {
1048 // J^+ = (J^T * iCV * diag(weights) * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1049
1050 invertedCovariances->selfSquareDiagonalMatrixMultiply(weightVector, jacobian, intermediateCovarianceJacobian);
1051 jacobian.transposedMultiply(intermediateCovarianceJacobian, JTJ);
1052 ocean_assert(JTJ.columns() == JTJ.rows());
1053 }
1054
1055 // J^T * iCV * diag(weights) * error
1056
1057 invertedCovariances->selfSquareDiagonalMatrixMultiply(weightedErrorVector, intermediateWeightedErrorVector);
1058 jacobian.transposedMultiply(intermediateWeightedErrorVector, jErrors);
1059
1060 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1061 }
1062 else
1063 {
1064 if constexpr (tEstimator == Estimator::ET_SQUARE)
1065 {
1066 // J^+ = (J^T * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1067 jacobian.selfTransposedSquareMatrix(JTJ);
1068 ocean_assert(JTJ.columns() == JTJ.rows());
1069 }
1070 else
1071 {
1072 // J^+ = (J^T * diag(weights) * J + lambda * diag(J^T * J))^-1 * J^T, dimension: m x dn
1073 jacobian.weightedSelfTransposedSquareMatrix(weightVector, JTJ);
1074 ocean_assert(JTJ.columns() == JTJ.rows());
1075 }
1076
1077 // error = J^T * diag(weights)
1078 jacobian.transposedMultiply(weightedErrorVector, jErrors);
1079 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1080 }
1081
1082 const Matrix JTJdiagonal(JTJ.diagonal());
1083
1084 while (i < iterations)
1085 {
1086 ++i;
1087
1088 // J^T * J = J^T * J + lambda * diag(J^T * J)
1089 if (lambda > Numeric::eps())
1090 {
1091 for (unsigned int n = 0; n < JTJ.columns(); ++n)
1092 {
1093 JTJ(n, n) = JTJdiagonal(n, 0) * (Scalar(1) + lambda);
1094 }
1095 }
1096
1097 // JTJ * deltas = -J^T * error
1098 // however, we determine JTJ * deltas = J^T * error and thus receive negative deltas (which then need to be subracted from the current model/parameter configuration)
1099
1100 bool solved = false;
1101
1102 if (provider.hasSolver())
1103 {
1104 solved = provider.solve(JTJ, jErrors, deltas);
1105 }
1106 else
1107 {
1108 solved = JTJ.solve<Matrix::MP_SYMMETRIC>(jErrors, deltas);
1109 }
1110
1111 if (solved)
1112 {
1113 oneValidIteration = true;
1114
1115 // check whether the offset has been converged
1116 if (Numeric::isEqualEps(deltas.norm() / Scalar(deltas.elements())))
1117 {
1118 i = iterations;
1119 }
1120
1121 // we apply the deltas by: new = old - deltas (due to the solved equation: JTJ * deltas = J^T * error)
1122 provider.applyCorrection(deltas);
1123
1124 const Scalar iterationError = provider.template determineRobustError<tEstimator>(swapWeightedErrorVector, swapWeightVector, invertedCovariances);
1125
1126 // check whether the new error is not better than the best one
1127 if (iterationError >= bestError)
1128 {
1129 // modify the lambda parameter and start a new optimization, as long as the lambda is not zero already or too large
1130 if (lambdaFactor > Numeric::eps() && lambda > 0 && lambda <= maxLambda)
1131 {
1132 lambda *= lambdaFactor;
1133 }
1134 else
1135 {
1136 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1137
1138 // no further improvement can be applied
1139 i = iterations;
1140 }
1141
1142 continue;
1143 }
1144
1145 // we have an improvement
1146 bestError = iterationError;
1147
1148 if (intermediateErrors != nullptr)
1149 {
1150 intermediateErrors->push_back(bestError);
1151 }
1152
1153 provider.acceptCorrection();
1154
1155 std::swap(swapWeightedErrorVector, weightedErrorVector);
1156 std::swap(swapWeightVector, weightVector);
1157
1158 if (Numeric::isNotEqualEps(lambdaFactor))
1159 {
1160 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1161 if (lambda > Numeric::eps())
1162 {
1163 lambda /= lambdaFactor;
1164 }
1165 }
1166
1167 // skip this inner loop here as a new jacobian has to be calculated
1168 break;
1169 }
1170 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1171 {
1172 lambda *= lambdaFactor;
1173 }
1174 else
1175 {
1176 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1177
1178 // no further improvement can be applied
1179 i = iterations;
1180 }
1181 }
1182 }
1183
1184 if (finalError)
1185 {
1186 *finalError = bestError;
1187 }
1188
1189 return oneValidIteration;
1190}
1191
1192template <typename T>
1193bool NonLinearOptimization::sparseOptimization(T& provider, const unsigned int iterations, const Estimator::EstimatorType estimator, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
1194{
1195 switch (estimator)
1196 {
1198 return sparseOptimization<T, Estimator::ET_SQUARE>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1199
1201 return sparseOptimization<T, Estimator::ET_LINEAR>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1202
1204 return sparseOptimization<T, Estimator::ET_HUBER>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1205
1207 return sparseOptimization<T, Estimator::ET_TUKEY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1208
1210 return sparseOptimization<T, Estimator::ET_CAUCHY>(provider, iterations, lambda, lambdaFactor, initialError, finalError, invertedCovariances, intermediateErrors);
1211
1212 default:
1213 ocean_assert(false && "Invalid estimator!");
1214 return false;
1215 }
1216}
1217
1218template <typename T, Estimator::EstimatorType tEstimator>
1219bool NonLinearOptimization::sparseOptimization(T& provider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, const Matrix* invertedCovariances, Scalars* intermediateErrors)
1220{
1221 constexpr Scalar maxLambda = Scalar(1e8);
1222
1223 ocean_assert(lambda >= Numeric::eps() && lambda <= maxLambda);
1224
1225 SparseMatrix jacobian;
1226
1227 // the error vectors are weighted for non-square estimators only
1228 Matrix weightedErrorVector, swapWeightedErrorVector;
1229 // the weight vectors are used for non-square estimators only
1230 Matrix weightVector, swapWeightVector;
1231
1232 const SparseMatrix invertedCovarianceMatrix = invertedCovariances ? SparseMatrix(invertedCovariances->rows(), invertedCovariances->rows(), *invertedCovariances) : SparseMatrix();
1233
1234 SparseMatrix JTJ;
1235 Matrix jErrors, deltas;
1236
1237 Scalar bestError = provider.template determineRobustError<tEstimator>(weightedErrorVector, weightVector, invertedCovariances);
1238
1239 if (bestError == Numeric::maxValue())
1240 {
1241 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1242 return false;
1243 }
1244
1245 if (initialError)
1246 {
1247 *initialError = bestError;
1248 }
1249
1250 if (intermediateErrors != nullptr)
1251 {
1252 ocean_assert(intermediateErrors->empty());
1253 intermediateErrors->push_back(bestError);
1254 }
1255
1256 bool oneValidIteration = false;
1257
1258 unsigned int i = 0u;
1259 while (i < iterations)
1260 {
1261 provider.determineJacobian(jacobian);
1262
1263 // if inverted covariance matrices are provided a further matrix multiplication must be respected
1264 if (invertedCovariances)
1265 {
1266 // delta = -(J^T * iCV * diag(weights) * J + lambda * diag(J^T * J)^-1 * J^T * iCV * diag(weights) * error
1267 // (J^T * iCV * diag(weights) * J + lambda * diag(J^T * J) * delta = -J^T * iCV * diag(weights) * error
1268 // (J^T * iCV * diag(weights) * J + lambda * diag(J^T * J) * -delta = J^T * iCV * diag(weights) * error
1269
1270 // J^T
1271 const SparseMatrix jacobianTransposed(jacobian.transposed());
1272
1273 if constexpr (tEstimator == Estimator::ET_SQUARE)
1274 {
1275 // J^T * iCV * J
1276 JTJ = jacobianTransposed * (invertedCovarianceMatrix * jacobian);
1277 ocean_assert(JTJ.columns() == JTJ.rows());
1278 }
1279 else
1280 {
1281 // J^T * ICV * diag(weights) * J
1282 JTJ = jacobianTransposed * (invertedCovarianceMatrix * (SparseMatrix(weightVector.rows(), weightVector.rows(), weightVector, true) * jacobian));
1283 ocean_assert(JTJ.columns() == JTJ.rows());
1284 }
1285
1286 // J^T * iCV * diag(weights) * error
1287 jErrors = jacobianTransposed * (invertedCovarianceMatrix * weightedErrorVector);
1288 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1289 }
1290 else
1291 {
1292 // delta = -(J^T * diag(weights) * J + lambda * diag(J^T * J)^-1 * J^T * diag(weights) * error
1293 // (J^T * diag(weights) * J + lambda * diag(J^T * J) * delta = -J^T * diag(weights) * error
1294 // (J^T * diag(weights) * J + lambda * diag(J^T * J) * -delta = J^T * diag(weights) * error
1295
1296 // J^T
1297 const SparseMatrix jacobianTransposed(jacobian.transposed());
1298
1299 if constexpr (tEstimator == Estimator::ET_SQUARE)
1300 {
1301 // J^T * J
1302 JTJ = jacobianTransposed * jacobian;
1303 ocean_assert(JTJ.columns() == JTJ.rows());
1304 }
1305 else
1306 {
1307 // J^T * diag(weights) * J
1308 JTJ = jacobianTransposed * SparseMatrix(weightVector.rows(), weightVector.rows(), weightVector, true) * jacobian;
1309 ocean_assert(JTJ.columns() == JTJ.rows());
1310 }
1311
1312 // J^T * diag(weights) * error
1313 jErrors = jacobianTransposed * weightedErrorVector;
1314 ocean_assert(jErrors.rows() == JTJ.rows() && jErrors.columns() == 1);
1315 }
1316
1317 const Matrix JTJdiagonal(JTJ.diagonal());
1318
1319 while (i < iterations)
1320 {
1321 ++i;
1322
1323 // J^T * J = J^T * J + lambda * diag(J^T * J)
1324 if (lambda > Numeric::eps())
1325 {
1326 for (unsigned int n = 0; n < JTJ.columns(); ++n)
1327 {
1328 if (JTJdiagonal(n, 0) != Scalar(0))
1329 {
1330 ocean_assert(!JTJ.isZero(n, n));
1331 JTJ(n, n) = JTJdiagonal(n, 0) * (Scalar(1) + lambda);
1332 }
1333 }
1334 }
1335
1336 // JTJ * deltas = -J^T * error
1337 // however, we determine JTJ * deltas = J^T * error and thus receive negative deltas (which then need to be subracted from the current model/parameter configuration)
1338
1339 bool solved = false;
1340
1341 if (provider.hasSolver())
1342 {
1343 solved = provider.solve(JTJ, jErrors, deltas);
1344 }
1345 else
1346 {
1347 solved = JTJ.solve(jErrors, deltas);
1348 }
1349
1350 if (solved)
1351 {
1352 oneValidIteration = true;
1353
1354 // check whether the offset has been converted
1355 if (Numeric::isEqual(deltas.norm() / Scalar(deltas.elements()), 0, Numeric::weakEps() * Scalar(0.01)))
1356 {
1357 i = iterations;
1358 }
1359
1360 // we apply the deltas by: new = old - deltas (due to the solved equation: JTJ * deltas = J^T * error)
1361 provider.applyCorrection(deltas);
1362
1363 const Scalar iterationError = provider.template determineRobustError<tEstimator>(swapWeightedErrorVector, swapWeightVector, invertedCovariances);
1364
1365 // check whether the new error is not better than the best one
1366 if (iterationError >= bestError)
1367 {
1368 // modify the lambda parameter and start a new optimization, as long as the lambda is not zero already or too large
1369 if (lambdaFactor > Numeric::eps() && lambda > 0 && lambda <= maxLambda)
1370 {
1371 lambda *= lambdaFactor;
1372 }
1373 else
1374 {
1375 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1376
1377 // no further improvement can be applied
1378 i = iterations;
1379 }
1380
1381 continue;
1382 }
1383
1384 // we have an improvement
1385 bestError = iterationError;
1386
1387 if (intermediateErrors != nullptr)
1388 {
1389 intermediateErrors->push_back(bestError);
1390 }
1391
1392 provider.acceptCorrection();
1393
1394 std::swap(swapWeightedErrorVector, weightedErrorVector);
1395 std::swap(swapWeightVector, weightVector);
1396
1397 if (Numeric::isNotEqualEps(lambdaFactor))
1398 {
1399 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1400 if (lambda > Numeric::eps())
1401 {
1402 lambda /= lambdaFactor;
1403 }
1404 }
1405
1406 // skip this inner loop here as a new jacobian has to be calculated
1407 break;
1408 }
1409 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1410 {
1411 lambda *= lambdaFactor;
1412 }
1413 else
1414 {
1415 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1416
1417 // no further improvement can be applied
1418 i = iterations;
1419 }
1420 }
1421 }
1422
1423 if (finalError)
1424 {
1425 *finalError = bestError;
1426 }
1427
1428 return oneValidIteration;
1429}
1430
1431template <typename T>
1432bool NonLinearOptimization::advancedDenseOptimization(T& advancedDenseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, Scalars* intermediateErrors)
1433{
1434 constexpr Scalar maxLambda = Scalar(1e8);
1435
1436 ocean_assert(lambda >= Scalar(0) && lambda <= maxLambda);
1437 ocean_assert((lambda == Scalar(0) && lambdaFactor == Scalar(1)) || (lambda > Scalar(0) && lambdaFactor > Scalar(1)));
1438
1439 const bool useLevenbergMarquardt = (lambda > 0 && lambdaFactor > 1);
1440
1441 // we determine the initial error only for the Levenberg-Marquardt optimization of if the user explicitly requests that value
1442 Scalar bestError = (useLevenbergMarquardt || initialError) ? advancedDenseProvider.determineError() : Numeric::minValue();
1443
1444 if (bestError == Numeric::maxValue())
1445 {
1446 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1447 return false;
1448 }
1449
1450 if (initialError)
1451 {
1452 *initialError = bestError;
1453 }
1454
1455 if (intermediateErrors != nullptr)
1456 {
1457 ocean_assert(intermediateErrors->empty());
1458 intermediateErrors->push_back(bestError);
1459 }
1460
1461 bool oneValidIteration = false;
1462
1463 Matrix hessian, jacobianError, deltas;
1464
1465 unsigned int i = 0u;
1466 while (!advancedDenseProvider.shouldStop() && i < iterations)
1467 {
1468 /**
1469 * Now the provider will determine the (n x n) Hessian matrix and the (n x 1) Jacobian-Error matrix, may be the approximated Hessian.
1470 * The Hessian matrix is calculated by J^T * J.
1471 * The Jacobian-Error matrix is calculated by J^T * E, with E the error vector.
1472 * The model has dimension n.
1473 */
1474 if (!advancedDenseProvider.determineHessianAndErrorJacobian(hessian, jacobianError))
1475 {
1476 ocean_assert(false && "The Hessian and the Jacobian with combined error values could not be determined!");
1477 return false;
1478 }
1479
1480 ocean_assert(hessian.rows() == hessian.columns());
1481 ocean_assert(hessian.rows() == jacobianError.rows() && jacobianError.columns() == 1);
1482
1483 // now we store the diagonal of the Hessian allowing to apply a Levenberg-Marquardt later
1484
1485 const Matrix hessianDiagonal(hessian.diagonal());
1486 ocean_assert(hessianDiagonal.rows() == hessian.rows() && hessianDiagonal.columns() == 1);
1487
1488 while (!advancedDenseProvider.shouldStop() && i++ < iterations)
1489 {
1490 // we apply the lambda value for the Levenberg-Marquardt:
1491 // Hessian + lambda * diag(Hessian)
1492 if (lambda > Numeric::eps())
1493 {
1494 for (unsigned int n = 0; n < hessian.columns(); ++n)
1495 {
1496 hessian(n, n) = hessianDiagonal(n, 0) * (Scalar(1) + lambda);
1497 }
1498 }
1499
1500 // now we solve: Hessian * deltas = -JacobianErrors, (we will subtract the deltas later so that we now can solve Hessian * deltas = +JacobianErrors)
1501
1502 bool solved = false;
1503
1504 if (advancedDenseProvider.hasSolver())
1505 {
1506 solved = advancedDenseProvider.solve(hessian, jacobianError, deltas);
1507 }
1508 else
1509 {
1510 solved = hessian.solve<Matrix::MP_SYMMETRIC>(jacobianError, deltas);
1511 }
1512
1513 if (solved)
1514 {
1515 oneValidIteration = true;
1516
1517 // check whether the offset has been converted
1518 if (Numeric::isWeakEqualEps(deltas.norm() / Scalar(deltas.elements())))
1519 {
1520 i = iterations;
1521 }
1522
1523 // we apply the deltas by: new = old - deltas
1524 advancedDenseProvider.applyCorrection(deltas);
1525
1526 const Scalar iterationError = (useLevenbergMarquardt || intermediateErrors) ? advancedDenseProvider.determineError() : Numeric::minValue();
1527
1528 // check whether a Levenberg-Marquardt approach is intended and then check whether the new error is not better than the best one
1529 if (useLevenbergMarquardt && iterationError >= bestError)
1530 {
1531 // modify the lambda parameter and start a new optimization
1532 if (lambda > 0 && lambda <= maxLambda)
1533 {
1534 lambda *= lambdaFactor;
1535 }
1536 else
1537 {
1538 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1539
1540 // no further improvement can be applied
1541 i = iterations;
1542 }
1543
1544 continue;
1545 }
1546
1547 // we have an improvement, or we have a Gauss-Newton optimization so that we do not store the previous iteration
1548 bestError = iterationError;
1549
1550 if (intermediateErrors != nullptr)
1551 {
1552 intermediateErrors->push_back(bestError);
1553 }
1554
1555 advancedDenseProvider.acceptCorrection();
1556
1557 if (useLevenbergMarquardt && Numeric::isNotEqualEps(lambdaFactor))
1558 {
1559 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1560 if (lambda > Numeric::eps())
1561 {
1562 lambda /= lambdaFactor;
1563 }
1564 }
1565
1566 // skip this inner loop here as a new jacobian has to be calculated
1567 break;
1568 }
1569 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1570 {
1571 lambda *= lambdaFactor;
1572 }
1573 else
1574 {
1575 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1576
1577 // no further improvement can be applied
1578 i = iterations;
1579 }
1580 }
1581 }
1582
1583 if (finalError)
1584 {
1585 // if Levenberg-Marquardt is not intended we have to determine the error as we did not do it before
1586 if (!useLevenbergMarquardt && !intermediateErrors)
1587 {
1588 bestError = advancedDenseProvider.determineError();
1589 }
1590
1591 *finalError = bestError;
1592 }
1593
1594 return oneValidIteration;
1595}
1596
1597template <typename T>
1598bool NonLinearOptimization::advancedSparseOptimization(T& advancedSparseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar* initialError, Scalar* finalError, Scalars* intermediateErrors)
1599{
1600 constexpr Scalar maxLambda = Scalar(1e8);
1601
1602 ocean_assert(lambda >= Scalar(0) && lambda <= maxLambda);
1603 ocean_assert((lambda == Scalar(0) && lambdaFactor == Scalar(1)) || (lambda > Scalar(0) && lambdaFactor > Scalar(1)));
1604
1605 const bool useLevenbergMarquardt = (lambda > 0 && lambdaFactor > 1);
1606
1607 // we determine the initial error only for the Levenberg-Marquardt optimization of if the user explicitly requests that value
1608 Scalar bestError = (useLevenbergMarquardt || initialError) ? advancedSparseProvider.determineError() : Numeric::minValue();
1609
1610 if (bestError == Numeric::maxValue())
1611 {
1612 ocean_assert(false && "The initial model was invalid and thus the optimization cannot be applied!");
1613 return false;
1614 }
1615
1616 if (initialError)
1617 {
1618 *initialError = bestError;
1619 }
1620
1621 if (intermediateErrors != nullptr)
1622 {
1623 ocean_assert(intermediateErrors->empty());
1624 intermediateErrors->push_back(bestError);
1625 }
1626
1627 bool oneValidIteration = false;
1628
1629 Matrix deltas;
1630
1631 unsigned int i = 0u;
1632 while (!advancedSparseProvider.shouldStop() && i < iterations)
1633 {
1634 /**
1635 * Now the provider will determine all parameters like the Jacobian, the transposed Jacobian, the Hessian and the Jacobian-Error matrix.
1636 * However, the provider may also determine parts of the values or abstract information as long as the provider is able to solve the linear equation later.
1637 */
1638 if (!advancedSparseProvider.determineParameters())
1639 {
1640 ocean_assert(false && "The provider failed to determine the abstract parameters.");
1641 return false;
1642 }
1643
1644 while (!advancedSparseProvider.shouldStop() && i++ < iterations)
1645 {
1646 ocean_assert(lambda >= Scalar(0));
1647 if (advancedSparseProvider.solve(deltas, lambda))
1648 {
1649 oneValidIteration = true;
1650
1651 // check whether the offset has been converted
1652 if (Numeric::isEqualEps(deltas.norm() / Scalar(deltas.elements())))
1653 {
1654 i = iterations;
1655 }
1656
1657 // we apply the deltas by: new = old - deltas
1658 advancedSparseProvider.applyCorrection(deltas);
1659
1660 const Scalar iterationError = (useLevenbergMarquardt || intermediateErrors) ? advancedSparseProvider.determineError() : Numeric::minValue();
1661
1662 // check whether a Levenberg-Marquardt approach is intended and then check whether the new error is not better than the best one
1663 if (useLevenbergMarquardt && iterationError >= bestError)
1664 {
1665 // modify the lambda parameter and start a new optimization
1666 if (lambda > 0 && lambda <= maxLambda)
1667 {
1668 lambda *= lambdaFactor;
1669 }
1670 else
1671 {
1672 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1673
1674 // no further improvement can be applied
1675 i = iterations;
1676 }
1677
1678 continue;
1679 }
1680
1681 // we have an improvement, or we have a Gauss-Newton optimization so that we do not store the previous iteration
1682 bestError = iterationError;
1683
1684 if (intermediateErrors != nullptr)
1685 {
1686 intermediateErrors->push_back(bestError);
1687 }
1688
1689 advancedSparseProvider.acceptCorrection();
1690
1691 if (useLevenbergMarquardt && Numeric::isNotEqualEps(lambdaFactor))
1692 {
1693 // we do not decrease lambda if lambda is already near to zero so that we simply should stop optimization if we fail to reduce the error
1694 if (lambda > Numeric::eps())
1695 {
1696 lambda /= lambdaFactor;
1697 }
1698 }
1699
1700 // skip this inner loop here as a new jacobian has to be calculated
1701 break;
1702 }
1703 else if (lambda > Numeric::eps() && lambda <= maxLambda)
1704 {
1705 lambda *= lambdaFactor;
1706 }
1707 else
1708 {
1709 ocean_assert(oneValidIteration && "At this moment we should have at least one valid iteration!");
1710
1711 // no further improvement can be applied
1712 i = iterations;
1713 }
1714 }
1715 }
1716
1717 if (finalError)
1718 {
1719 // if Levenberg-Marquardt is not intended we have to determine the error as we did not do it before
1720 if (!useLevenbergMarquardt && !intermediateErrors)
1721 {
1722 bestError = advancedSparseProvider.determineError();
1723 }
1724
1725 *finalError = bestError;
1726 }
1727
1728 return oneValidIteration;
1729}
1730
1731template <Estimator::EstimatorType tEstimator>
1732Scalar NonLinearOptimization::sqrErrors2robustErrors2(const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances)
1733{
1734 // determine the sigma ideal for the square errors
1735 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors.data(), sqrErrors.size(), modelParameters)) : 0;
1736
1737 Scalar robustError = 0;
1738
1739 for (size_t n = 0; n < sqrErrors.size(); ++n)
1740 {
1741 ocean_assert(Numeric::isEqual(weightedErrors[n].sqr(), sqrErrors[n]));
1742
1743 // determine the weight for each individual image point, however as e.g., the tukey estimator may return a weight of 0 we have to clamp the weight to ensure that we still can solve the equation
1744 // **NOTE** the much better way would be to remove the entry from the equation and to solve it
1745 const Scalar weight = max(Numeric::weakEps(), Estimator::robustWeightSquare<tEstimator>(sqrErrors[n], sqrSigma));
1746
1747 // increase the total robust error
1748 if (transposedInvertedCovariances)
1749 {
1750 robustError += (transposedInvertedCovariances[n].transposed() * weightedErrors[n]).sqr() * weight;
1751 }
1752 else
1753 {
1754 robustError += sqrErrors[n] * weight;
1755 }
1756
1757 weightedErrors[n] *= weight;
1758
1759 weightVectors[n] = Vector2(weight, weight);
1760 }
1761
1762 // return the averaged robust error
1763 return robustError / Scalar(sqrErrors.size());
1764}
1765
1766template <Estimator::EstimatorType tEstimator, size_t tDimension>
1767Scalar NonLinearOptimization::sqrErrors2robustErrors(const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances)
1768{
1769 ocean_assert(transposedInvertedCovariances == nullptr && "Currently not implemenated");
1770 OCEAN_SUPPRESS_UNUSED_WARNING(transposedInvertedCovariances);
1771
1772 // determine the sigma ideal for the square errors
1773 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors.data(), sqrErrors.size(), modelParameters)) : 0;
1774
1775 Scalar robustError = 0;
1776
1777 for (size_t n = 0; n < sqrErrors.size(); ++n)
1778 {
1779 StaticBuffer<Scalar, tDimension>& weightedErrorsPointer = weightedErrors[n];
1780 StaticBuffer<Scalar, tDimension>& weightVectorsPointer = weightVectors[n];
1781
1782 ocean_assert(Numeric::isEqual(Numeric::summedSqr(weightedErrorsPointer.data(), tDimension), sqrErrors[n]));
1783
1784 // determine the weight for each individual image point, however as e.g., the tukey estimator may return a weight of 0 we have to clamp the weight to ensure that we still can solve the equation
1785 // **NOTE** the much better way would be to remove the entry from the equation and to solve it
1786 const Scalar weight = max(Numeric::weakEps(), Estimator::robustWeightSquare<tEstimator>(sqrErrors[n], sqrSigma));
1787
1788 // increase the total robust error
1789 robustError += sqrErrors[n] * weight;
1790
1791 for (size_t d = 0; d < tDimension; ++d)
1792 {
1793 weightedErrorsPointer[d] *= weight;
1794 weightVectorsPointer[d] = weight;
1795 }
1796 }
1797
1798 // return the averaged robust error
1799 return robustError / Scalar(sqrErrors.size());
1800}
1801
1802template <Estimator::EstimatorType tEstimator>
1803Scalar NonLinearOptimization::sqrErrors2robustErrors_i(const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i)
1804{
1805 ocean_assert(transposedInvertedCovariances_i == nullptr && "Currently not implemenated");
1806 OCEAN_SUPPRESS_UNUSED_WARNING(transposedInvertedCovariances_i);
1807
1808 // determine the sigma ideal for the square errors
1809 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors.data(), sqrErrors.size(), modelParameters)) : 0;
1810
1811 Scalar robustError = 0;
1812
1813 for (size_t n = 0; n < sqrErrors.size(); ++n)
1814 {
1815 Scalar* const weightedErrorsPointer = weightedErrors_i + n * dimension;
1816 Scalar* const weightVectorsPointer = weightVectors_i + n * dimension;
1817
1818 ocean_assert(Numeric::isEqual(Numeric::summedSqr(weightedErrorsPointer, dimension), sqrErrors[n]));
1819
1820 // determine the weight for each individual image point
1821 const Scalar weight = Estimator::robustWeightSquare<tEstimator>(sqrErrors[n], sqrSigma);
1822
1823 // increase the total robust error
1824 robustError += sqrErrors[n] * weight;
1825
1826 for (size_t d = 0; d < dimension; ++d)
1827 {
1828 weightedErrorsPointer[d] *= weight;
1829 weightVectorsPointer[d] = weight;
1830 }
1831 }
1832
1833 // return the averaged robust error
1834 return robustError / Scalar(sqrErrors.size());
1835}
1836
1837inline Scalar NonLinearOptimization::sqrErrors2robustErrors2(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, Vector2* weightedErrors, Vector2* weightVectors, const SquareMatrix2* transposedInvertedCovariances)
1838{
1839 switch (estimator)
1840 {
1842 return sqrErrors2robustErrors2<Estimator::ET_SQUARE>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1843
1845 return sqrErrors2robustErrors2<Estimator::ET_LINEAR>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1846
1848 return sqrErrors2robustErrors2<Estimator::ET_HUBER>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1849
1851 return sqrErrors2robustErrors2<Estimator::ET_TUKEY>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1852
1854 return sqrErrors2robustErrors2<Estimator::ET_CAUCHY>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1855
1856 default:
1857 ocean_assert(false && "Invalid estimator!");
1858 return Numeric::maxValue();
1859 }
1860}
1861
1862template <size_t tDimension>
1863inline Scalar NonLinearOptimization::sqrErrors2robustErrors(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, StaticBuffer<Scalar, tDimension>* weightedErrors, StaticBuffer<Scalar, tDimension>* weightVectors, const Matrix* transposedInvertedCovariances)
1864{
1865 switch (estimator)
1866 {
1868 return sqrErrors2robustErrors<Estimator::ET_SQUARE, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1869
1871 return sqrErrors2robustErrors<Estimator::ET_LINEAR, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1872
1874 return sqrErrors2robustErrors<Estimator::ET_HUBER, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1875
1877 return sqrErrors2robustErrors<Estimator::ET_TUKEY, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1878
1880 return sqrErrors2robustErrors<Estimator::ET_CAUCHY, tDimension>(sqrErrors, modelParameters, weightedErrors, weightVectors, transposedInvertedCovariances);
1881
1882 default:
1883 ocean_assert(false && "Invalid estimator!");
1884 return Numeric::maxValue();
1885 }
1886}
1887
1888inline Scalar NonLinearOptimization::sqrErrors2robustErrors_i(const Estimator::EstimatorType estimator, const Scalars& sqrErrors, const size_t modelParameters, const size_t dimension, Scalar* weightedErrors_i, Scalar* weightVectors_i, const Matrix* transposedInvertedCovariances_i)
1889{
1890 switch (estimator)
1891 {
1893 return sqrErrors2robustErrors_i<Estimator::ET_SQUARE>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1894
1896 return sqrErrors2robustErrors_i<Estimator::ET_LINEAR>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1897
1899 return sqrErrors2robustErrors_i<Estimator::ET_HUBER>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1900
1902 return sqrErrors2robustErrors_i<Estimator::ET_TUKEY>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1903
1905 return sqrErrors2robustErrors_i<Estimator::ET_CAUCHY>(sqrErrors, modelParameters, dimension, weightedErrors_i, weightVectors_i, transposedInvertedCovariances_i);
1906
1907 default:
1908 ocean_assert(false && "Invalid estimator!");
1909 return Numeric::maxValue();
1910 }
1911}
1912
1913}
1914
1915}
1916
1917#endif // META_OCEAN_GEOMETRY_NON_LINEAR_OPTIMIZATION_H
virtual size_t size() const =0
Returns the number of accessible elements of this accessor object.
This class implements a base class for all indexed-based accessors allowing a constant reference acce...
Definition Accessor.h:241
EstimatorType
Definition of individual robust estimator types.
Definition Estimator.h:34
@ ET_HUBER
The Huber estimator type.
Definition Estimator.h:84
@ ET_TUKEY
The Tukey estimator.
Definition Estimator.h:102
@ ET_SQUARE
The standard square error estimator (L2).
Definition Estimator.h:52
@ ET_LINEAR
The linear estimator (L1).
Definition Estimator.h:66
@ ET_CAUCHY
The Cauchy estimator.
Definition Estimator.h:118
This class implements the base for an advanced dense optimization provider.
Definition NonLinearOptimization.h:327
void acceptCorrection()
Accepts the current model candidate a new (better) model than the previous one.
Definition NonLinearOptimization.h:802
Scalar determineError()
Determines the error for the current model candidate (not the actual model).
Definition NonLinearOptimization.h:785
bool determineHessianAndErrorJacobian(Matrix &hessian, Matrix &jacobianError)
Determines the Hessian matrix and the Error-Jacobian vector based on the actual/current model (the tr...
Definition NonLinearOptimization.h:791
bool hasSolver() const
Returns whether the provider comes with an own solver for the linear equation.
Definition NonLinearOptimization.h:813
bool shouldStop()
Returns whether the optimization process should stop e.g., due to an external event.
Definition NonLinearOptimization.h:807
bool solve(const Matrix &hessian, const Matrix &jacobianError, Matrix &deltas) const
Solves the linear equation Hessian * deltas = -jacobianError.
Definition NonLinearOptimization.h:818
AdvancedDenseOptimizationProvider()
Protected default constructor.
Definition NonLinearOptimization.h:780
void applyCorrection(const Matrix &deltas)
Creates a new model candidate by adjusting the current model with delta values.
Definition NonLinearOptimization.h:797
This class implements the base class for an advanced sparse optimization provider.
Definition NonLinearOptimization.h:394
void acceptCorrection()
Accepts the current model candidate a new (better) model than the previous one.
Definition NonLinearOptimization.h:846
bool determineParameters()
Determines any kind of (abstract) parameters based on the current/actual model (not the model candida...
Definition NonLinearOptimization.h:835
bool shouldStop()
Returns whether the optimization process should stop e.g., due to an external event.
Definition NonLinearOptimization.h:851
Scalar determineError()
Determines the error for the current model candidate (not the actual/actual model).
Definition NonLinearOptimization.h:829
bool solve(Matrix &deltas, const Scalar lambda=Scalar(0))
Solves the linear equation Hessian * deltas = -jacobianError based on the internal data.
Definition NonLinearOptimization.h:857
void applyCorrection(const Matrix &deltas)
Creates a new model candidate by adjusting the current/actual model with delta values.
Definition NonLinearOptimization.h:841
AdvancedSparseOptimizationProvider()
Protected default constructor.
Definition NonLinearOptimization.h:824
This class implements the base class for an accessor of groups of pairs.
Definition NonLinearOptimization.h:47
CorrespondenceGroupsAccessor(CorrespondenceGroupsAccessor< TFirst, TSecond > &&accessor)
Move constructor.
Definition NonLinearOptimization.h:661
size_t groupElements(const size_t groupIndex) const
Returns the number of elements within a specified group.
Definition NonLinearOptimization.h:674
size_t groups() const
Returns the number of groups of this accessor.
Definition NonLinearOptimization.h:668
const TSecond & secondElement(const size_t groupIndex, const size_t elementIndex) const
Returns the first element of a pair of a specific group of this object.
Definition NonLinearOptimization.h:703
CorrespondenceGroupsAccessor< TFirst, TSecond > & operator=(CorrespondenceGroupsAccessor< TFirst, TSecond > &&accessor)
Move constructor.
Definition NonLinearOptimization.h:719
CorrespondenceGroupsAccessor(const ElementGroups &elementGroups)
Creates a new accessor object.
Definition NonLinearOptimization.h:640
CorrespondenceGroupsAccessor< TFirst, TSecond > & operator=(const CorrespondenceGroupsAccessor< TFirst, TSecond > &accessor)
Copy constructor.
Definition NonLinearOptimization.h:712
void element(const size_t groupIndex, const size_t elementIndex, TFirst &first, TSecond &second) const
Returns one pair of a specific group of this object.
Definition NonLinearOptimization.h:682
std::vector< Elements > ElementGroups
Definition of a vector holding a group of elements.
Definition NonLinearOptimization.h:63
const TFirst & firstElement(const size_t groupIndex, const size_t elementIndex) const
Returns the first element of a pair of a specific group of this object.
Definition NonLinearOptimization.h:694
std::vector< Element > Elements
Definition of a vector holding elements.
Definition NonLinearOptimization.h:58
CorrespondenceGroupsAccessor()
Creates a new accessor object.
Definition NonLinearOptimization.h:634
CorrespondenceGroupsAccessor(const CorrespondenceGroupsAccessor< TFirst, TSecond > &accessor)
Copy constructor.
Definition NonLinearOptimization.h:654
CorrespondenceGroupsAccessor(ElementGroups &&elementGroups)
Creates a new accessor object.
Definition NonLinearOptimization.h:647
ElementGroups elementGroups_
The groups of elements of this accessor.
Definition NonLinearOptimization.h:151
std::pair< Index32, Vector2 > Element
Definition of a pair combining an object point id with an image point.
Definition NonLinearOptimization.h:53
This class implements an abstract specialization of the accessor for groups of pairs for object point...
Definition NonLinearOptimization.h:159
ObjectPointGroupsAccessor()
Creates a new accessor object.
Definition NonLinearOptimization.h:729
This class implements a group accessor providing access to pairs of poses and image points.
Definition NonLinearOptimization.h:223
size_t addObjectPoint(Elements &&poseIdImagePointPairs)
Adds the observations of a new object point to this accessor.
Definition NonLinearOptimization.h:952
This class implements the base optimization provider.
Definition NonLinearOptimization.h:293
bool solve(const Matrix &JTJ, const Matrix &jErrors, Matrix &deltas) const
Solves the linear equation JTJ * deltas = jErrors.
Definition NonLinearOptimization.h:768
bool hasSolver() const
Returns whether the provider comes with an own solver for the linear equation.
Definition NonLinearOptimization.h:763
This class implements an abstract specialization of the accessor for groups of pairs for poses.
Definition NonLinearOptimization.h:185
PoseGroupsAccessor()
Creates a new accessor object.
Definition NonLinearOptimization.h:746
This class implements the basic functions for least square or robust optimization algorithms for non ...
Definition NonLinearOptimization.h:34
static bool denseOptimization(T &provider, const unsigned int iterations=5u, const Estimator::EstimatorType estimator=Estimator::ET_SQUARE, Scalar lambda=Scalar(0.001), const Scalar lambdaFactor=Scalar(5), Scalar *initialError=nullptr, Scalar *finalError=nullptr, const Matrix *invertedCovariances=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a dense (matrix) optimization problem.
Definition NonLinearOptimization.h:964
static bool sparseOptimization(T &provider, const unsigned int iterations=5u, const Estimator::EstimatorType estimator=Estimator::ET_SQUARE, Scalar lambda=Scalar(0.001), const Scalar lambdaFactor=Scalar(5), Scalar *initialError=nullptr, Scalar *finalError=nullptr, const Matrix *invertedCovariances=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a sparse (matrix) optimization problem.
Definition NonLinearOptimization.h:1193
static Scalar sqrErrors2robustErrors_i(const Scalars &sqrErrors, const size_t modelParameters, const size_t dimension, Scalar *weightedErrors_i, Scalar *weightVectors_i, const Matrix *transposedInvertedCovariances_i)
Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
Definition NonLinearOptimization.h:1803
static Scalar sqrErrors2robustErrors2(const Scalars &sqrErrors, const size_t modelParameters, Vector2 *weightedErrors, Vector2 *weightVectors, const SquareMatrix2 *transposedInvertedCovariances)
Translates the n/2 squared errors that correspond to n elements in the error vector to robust errors.
Definition NonLinearOptimization.h:1732
static bool advancedDenseOptimization(T &advancedDenseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar *initialError=nullptr, Scalar *finalError=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a dense (matrix) optimization problem using an advanced optimization prov...
Definition NonLinearOptimization.h:1432
static Scalar sqrErrors2robustErrors(const Scalars &sqrErrors, const size_t modelParameters, StaticBuffer< Scalar, tDimension > *weightedErrors, StaticBuffer< Scalar, tDimension > *weightVectors, const Matrix *transposedInvertedCovariances)
Translates the n/i squared errors that correspond to n elements in the error vector to robust errors.
Definition NonLinearOptimization.h:1767
static bool advancedSparseOptimization(T &advancedSparseProvider, const unsigned int iterations, Scalar lambda, const Scalar lambdaFactor, Scalar *initialError=nullptr, Scalar *finalError=nullptr, Scalars *intermediateErrors=nullptr)
Invokes the optimization of a sparse (matrix) optimization problem using an advanced optimization pro...
Definition NonLinearOptimization.h:1598
This class implements an optimization for universal dense problems with one model (optimization probl...
Definition NonLinearUniversalOptimizationDense.h:32
MatrixT< T > transposedMultiply(const MatrixT< T > &right) const
Multiplies this transposed matrix with a second matrix.
void weightedSelfTransposedSquareMatrix(const MatrixT< T > &weightDiagonal, MatrixT< T > &result) const
Returns the matrix product of transposed matrix of this matrix and this matrix and applies a further ...
size_t columns() const
Returns the count of columns.
Definition Matrix.h:698
MatrixT< T > selfTransposedSquareMatrix() const
Returns the matrix product of transposed matrix of this matrix and this matrix.
size_t elements() const
Returns the number of entire elements, which is the product of rows and columns.
Definition Matrix.h:704
MatrixT< T > diagonal() const
Returns a vector containing the values of the diagonal.
T norm() const
Determines the L1 norm (sum of absolute elements) of this matrix.
Definition Matrix.h:772
@ MP_SYMMETRIC
The matrix is symmetric.
Definition Matrix.h:79
size_t rows() const
Returns the count of rows.
Definition Matrix.h:692
bool selfSquareDiagonalMatrixMultiply(const MatrixT< T > &right, MatrixT< T > &result) const
Interprets this matrix as diagonal matrix and multiplies a second matrix on the right of the interpre...
bool solve(const MatrixT< T > &b, MatrixT< T > &x, const MatrixProperty matrixProperty=MP_UNKNOWN) const
Solves the given linear system.
Definition Matrix.h:710
static constexpr T minValue()
Returns the min scalar value.
Definition Numeric.h:3250
static constexpr T weakEps()
Returns a weak epsilon.
static constexpr bool isWeakEqualEps(const T value)
Returns whether a value is smaller than or equal to a weak epsilon.
Definition Numeric.h:2162
static constexpr T eps()
Returns a small epsilon.
static bool isEqual(const T first, const T second)
Returns whether two values are equal up to a small epsilon.
Definition Numeric.h:2386
static constexpr T sqr(const T value)
Returns the square of a given value.
Definition Numeric.h:1495
static constexpr bool isEqualEps(const T value)
Returns whether a value is smaller than or equal to a small epsilon.
Definition Numeric.h:2087
static constexpr bool isNotEqualEps(const T value)
Returns whether a value is not smaller than or equal to a small epsilon.
Definition Numeric.h:2237
static T summedSqr(const T *values, const size_t number)
Returns the summed squares of a given values.
Definition Numeric.h:1514
static constexpr T maxValue()
Returns the max scalar value.
Definition Numeric.h:3244
This class implements a sparse matrix using a float type for its elements that is specified by T.
Definition SparseMatrix.h:61
bool isZero(const size_t row, const size_t column) const
Returns whether a specified elements is zero.
SparseMatrixT< T > transposed() const
Returns the transposes matrix of this matrix.
size_t rows() const
Returns the number of rows this matrix has.
size_t columns() const
Returns the number of columns this matrix has.
bool solve(const MatrixT< T > &b, MatrixT< T > &x) const
Solves the given linear system.
MatrixT< T > diagonal() const
Returns a vector containing the values of the diagonal.
This class implements a 2x2 square matrix.
Definition SquareMatrix2.h:73
SquareMatrixT2< T > transposed() const
Returns the transposed of this matrix.
Definition SquareMatrix2.h:663
This class implements a static buffer that has a fixed capacity.
Definition StaticBuffer.h:24
const T * data() const
Returns the buffer data pointer.
Definition StaticBuffer.h:240
unsigned int sqr(const char value)
Returns the square value of a given value.
Definition base/Utilities.h:1029
uint32_t Index32
Definition of a 32 bit index value.
Definition Base.h:84
SparseMatrixT< Scalar > SparseMatrix
Definition of the SparseMatrix object, depending on the OCEAN_MATH_USE_SINGLE_PRECISION either with s...
Definition SparseMatrix.h:30
float Scalar
Definition of a scalar type.
Definition Math.h:129
std::vector< Scalar > Scalars
Definition of a vector holding Scalar objects.
Definition Math.h:145
VectorT2< Scalar > Vector2
Definition of a 2D vector.
Definition Vector2.h:28
The namespace covering the entire Ocean framework.
Definition Accessor.h:15