Ocean
Loading...
Searching...
No Matches
Error.h
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7
8#ifndef META_OCEAN_GEOMETRY_ERROR_H
9#define META_OCEAN_GEOMETRY_ERROR_H
10
13
14#include "ocean/base/Accessor.h"
15
20
21#include <algorithm>
22
23namespace Ocean
24{
25
26namespace Geometry
27{
28
29/**
30 * This class implements to functions to determine the error or accuracy of geometric functions and their parameter.
31 * @ingroup geometry
32 */
33class OCEAN_GEOMETRY_EXPORT Error
34{
35 public:
36
37 /**
38 * Definition of different error determination stages.
39 */
41 {
42 /// Invalid stage.
44 /// Unique error determination.
46 /// Approximated error determination.
48 /// Ambiguous error determination.
49 ED_AMBIGUOUS
50 };
51
52 private:
53
54 /**
55 * This class implements an element storing the error between to image points.
56 * Further, the indices of the points are stored.
57 */
59 {
60 public:
61
62 /**
63 * Creates a new error element.
64 * @param imageIndex Index of the image point
65 * @param candidateIndex Index of the candidate point
66 * @param error The error between the two points
67 */
68 inline ErrorElement(const unsigned int imageIndex, const unsigned int candidateIndex, const Scalar error);
69
70 /**
71 * Returns the index of the image point.
72 * @return Image point index
73 */
74 inline unsigned int imageIndex() const;
75
76 /**
77 * Return the index of the candidate point.
78 * @return Candidate point index
79 */
80 inline unsigned int candidateIndex() const;
81
82 /**
83 * Returns the error between the two points.
84 * @return Stored error value
85 */
86 inline Scalar error() const;
87
88 /**
89 * Returns whether the left element has a smaller error than the right one.
90 * @param element Right element to compare
91 * @return True, if so
92 */
93 inline bool operator<(const ErrorElement& element) const;
94
95 private:
96
97 /// Index of the image point.
98 unsigned int imageIndex_ = (unsigned int)(-1);
99
100 /// Index of the candidate point.
101 unsigned int candidateIndex_ = (unsigned int)(-1);
102
103 /// Error between the two points.
104 Scalar error_ = Numeric::maxValue();
105 };
106
107 /**
108 * Definition of a vector holding error elements.
109 */
110 typedef std::vector<ErrorElement> ErrorElements;
111
112 public:
113
114 /**
115 * Determines the indices of a set of given parameter values that are below ore equal to a provided threshold.
116 * @param parameters Parameter values that have to be investigated
117 * @param number The number of provided values, with range [1, infinity)
118 * @param threshold The threshold that is used to filter the parameter values
119 * @param validIndices Resulting indices of the provided parameter values that are below the specified threshold
120 */
121 static void determineValidParameters(const Scalar* parameters, const size_t number, const Scalar threshold, Indices32& validIndices);
122
123 /**
124 * Determines the indices of a set of given parameter values that are above a provided threshold.
125 * @param parameters Parameter values that have to be investigated
126 * @param number The number of provided values, with range [1, infinity)
127 * @param threshold The threshold that is used to filter the parameter values
128 * @param validIndices Resulting indices of the provided parameter values that are above the specified threshold
129 */
130 static void determineInvalidParameters(const Scalar* parameters, const size_t number, const Scalar threshold, Indices32& validIndices);
131
132 /**
133 * Returns the average square error between two sets of 2D positions.
134 * Each point in the first point set correspond to a point in the second point set with same index.
135 * @param firstPoints First set of 2D points, each point has a corresponding point in the second set
136 * @param secondPoints Second set of 2D points
137 * @param errors Optional resulting errors individual for each provided point pair, make sure that the provided buffer is large enough
138 * @param sqrErrors Optional resulting squared errors individual for each provided point pair, make sure that the provided buffer is large enough
139 * @return Average square error between all point correspondences
140 */
141 static Scalar determineAverageError(const Vectors2& firstPoints, const Vectors2& secondPoints, Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
142
143 /**
144 * Returns the average square error between two sets of 2D positions.
145 * Each point in the first point set correspond to a point in the second point set with same index.
146 * @param firstTransformation Transformation that will be applied for all image points from the first set
147 * @param firstPoints First set of 2D points, each point has a corresponding point in the second set
148 * @param secondTransformation Transformation that will be applied for all image points from the second set
149 * @param secondPoints Second set of 2D points
150 * @return Average square error between all point correspondences
151 */
152 static Scalar determineAverageError(const SquareMatrix3& firstTransformation, const Vectors2& firstPoints, const SquareMatrix3& secondTransformation, const Vectors2& secondPoints);
153
154 /**
155 * Returns the average square error between two sets of 3D positions.
156 * Each point in the first point set correspond to a point in the second point set with same index.
157 * @param firstPoints First set of 3D points, each point has a corresponding point in the second set
158 * @param secondPoints Second set of 3D points
159 * @return Average square error between all point correspondences
160 */
161 static Scalar determineAverageError(const Vectors3& firstPoints, const Vectors3& secondPoints);
162
163 /**
164 * Determining the average, minimal and maximal square error between two sets of 2D positions.
165 * Each point in the first point set correspond to a point in the second point set with same index.
166 * @param firstPoints First set of 2D points, each point has a corresponding point in the second set
167 * @param secondPoints Second set of 2D points
168 * @param sqrAverageError Average square error of all point correspondences
169 * @param sqrMinimalError Minimal square error of all point correspondences
170 * @param sqrMaximalError Maximal square error of all point correspondences
171 * @return Sum of square errors of all point correspondences
172 */
173 static Scalar determineError(const Vectors2& firstPoints, const Vectors2& secondPoints, Scalar& sqrAverageError, Scalar& sqrMinimalError, Scalar& sqrMaximalError);
174
175 /**
176 * Returns the average, minimal and maximal square error between two sets of 3D positions.
177 * Each point in the first point set correspond to a point in the second point set with same index.
178 * @param firstPoints First set of 3D points, each point has a corresponding point in the second set
179 * @param secondPoints Second set of 3D points
180 * @param sqrAverageError Average square error of all point correspondences
181 * @param sqrMinimalError Minimal square error of all point correspondences
182 * @param sqrMaximalError Maximal square error of all point correspondences
183 * @return Sum of square errors of all point correspondences
184 */
185 static Scalar determineError(const Vectors3& firstPoints, const Vectors3& secondPoints, Scalar& sqrAverageError, Scalar& sqrMinimalError, Scalar& sqrMaximalError);
186
187 /**
188 * Returns whether the offsets between two given 6DOF poses are below specified thresholds.
189 * @param poseFirst First pose to be checked
190 * @param poseSecond Second poses to be checked
191 * @param maxTranslationOffset Maximal allowed translation offset, with positive values for the individual translation axes
192 * @param maxOrientationOffset Maximal allowed orientation offset, defined in radian
193 * @return True, if so
194 */
195 static bool posesAlmostEqual(const HomogenousMatrix4& poseFirst, const HomogenousMatrix4& poseSecond, const Vector3& maxTranslationOffset = Vector3(Scalar(0.1), Scalar(0.1), Scalar(0.1)), const Scalar maxOrientationOffset = Numeric::deg2rad(15));
196
197 /**
198 * Returns whether the offsets between two given 6DOF poses are below specified thresholds.
199 * @param poseFirst First pose to be checked
200 * @param poseSecond Second poses to be checked
201 * @param maxTranslationOffset Maximal allowed translation offset, with positive values for the individual translation axes, might be Vector3(Scalar(0.1), Scalar(0.1), Scalar(0.1))
202 * @return True, if so
203 */
204 static inline bool posesAlmostEqual(const HomogenousMatrix4& poseFirst, const HomogenousMatrix4& poseSecond, const Vector3& maxTranslationOffset);
205
206 /**
207 * Returns whether the offsets between two given 6DOF poses are below specified thresholds.
208 * @param poseFirst First pose to be checked
209 * @param poseSecond Second poses to be checked
210 * @param maxOrientationOffset Maximal allowed orientation offset, defined in radian, might be Numeric::deg2rad(15)
211 * @return True, if so
212 */
213 static inline bool posesAlmostEqual(const HomogenousMatrix4& poseFirst, const HomogenousMatrix4& poseSecond, const Scalar maxOrientationOffset);
214
215 /**
216 * Determines the accuracy of the intrinsic camera matrix (and camera distortion parameters if requested).
217 * The accuracy is determined by transforming the normalized 3D image points (3D object points transformed by the flipped and inverted extrinsic matrix) to the image plane.<br>
218 * @param pinholeCamera The pinhole camera object to be tested
219 * @param normalizedObjectPoints Normalized object points
220 * @param imagePoints Image points, each point corresponds to one normalized object point
221 * @param correspondences Number of correspondences to be checked
222 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
223 * @param errors Optional resulting error values individually for each given point correspondence
224 * @param sqrErrors Optional resulting squared error values individually for each given point correspondence
225 * @return Returns the average square projection pixel error
226 */
227 static Scalar determineCameraError(const PinholeCamera& pinholeCamera, const Vector2* normalizedObjectPoints, const Vector2* imagePoints, const size_t correspondences, const bool useDistortionParameters, Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
228
229 /**
230 * Determines the accuracy of the intrinsic camera matrix (and camera distortion parameters if requested).
231 * The accuracy is determined by transforming the normalized 3D image points (3D object points transformed by the flipped and inverted extrinsic matrix) to the image plane.<br>
232 * @param pinholeCamera The pinhole camera object to be tested
233 * @param normalizedObjectPoints Normalized object points
234 * @param imagePoints Image points, each point corresponds to one normalized object point
235 * @param correspondences Number of correspondences to be checked
236 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
237 * @param sqrAveragePixelError Average square projection pixel error
238 * @param sqrMinimalPixelError Minimal square projection pixel error
239 * @param sqrMaximalPixelError Maximal square projection pixel error
240 */
241 static void determineCameraError(const PinholeCamera& pinholeCamera, const Vector2* normalizedObjectPoints, const Vector2* imagePoints, const size_t correspondences, const bool useDistortionParameters, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError);
242
243 /**
244 * Determines the accuracy of a given homography for a set of corresponding image points.
245 * The given homography H transforms a point p0 from the first set of image points to the corresponding point p1 from the second set of image points: p1 = H * p0
246 * In case, a transformed point cannot be normalized (de-homogenized) by the z-component, the individual errors will be set to Numeric::maxValue() and the resulting average error will be accordingly.
247 * @param points1_H_points0 The homography transforming points0 to points1, must be valid
248 * @param imagePointAccessor0 The first set of image points, may be empty
249 * @param imagePointAccessor1 The second set of image points, each point has a corresponding point in the first set
250 * @param errors Optional resulting error values individually for each given point correspondence
251 * @param sqrErrors Optional resulting squared error values individually for each given point correspondence
252 * @return Returns the average square pixel error
253 * @tparam TAccessorImagePoints The template type of the accessor for the image points
254 * @tparam tResultingErrors True, if errors is defined
255 * @tparam tResultingSqrErrors True, if sqrErrors is defined
256 */
257 template <typename TAccessorImagePoints, bool tResultingErrors, bool tResultingSqrErrors>
258 static Scalar determineHomographyError(const SquareMatrix3& points1_H_points0, const TAccessorImagePoints& imagePointAccessor0, const TAccessorImagePoints& imagePointAccessor1, Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
259
260 /**
261 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
262 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
263 * @param world_T_camera The camera pose transforming camera to world, with default camera pointing towards the negative z-space, with y-axis upwards, must be valid
264 * @param camera The camera model defining the projection, must be valid
265 * @param objectPoint The 3D Object point, defined in world
266 * @param imagePoint The 2D image point corresponding to the object point, defined in the camera pixel domain
267 * @return The resulting error value
268 */
269 static inline Vector2 determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& camera, const Vector3& objectPoint, const Vector2& imagePoint);
270
271 /**
272 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
273 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
274 * @param flippedCamera_T_world The inverted and flipped camera pose, transforming world to flipped camera, with default flipped camera pointing towards the positive z-space, with y-axis downwards, must be valid
275 * @param camera The camera profile defining the projection, must be valid
276 * @param objectPoint The 3D Object point, defined in world
277 * @param imagePoint The 2D image point corresponding to the object point, defined in the camera pixel domain
278 * @return The resulting error value
279 */
280 static inline Vector2 determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, const Vector3& objectPoint, const Vector2& imagePoint);
281
282 /**
283 * Deprecated.
284 *
285 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
286 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
287 * @param world_T_camera The camera pose transforming camera to world, must be valid
288 * @param pinholeCamera The pinhole camera model defining the projection, must be valid
289 * @param objectPoint 3D Object point defined in world
290 * @param imagePoint 2D Image point corresponding to the object point
291 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
292 * @return Resulting error value
293 */
294 static inline Vector2 determinePoseError(const HomogenousMatrix4& world_T_camera, const PinholeCamera& pinholeCamera, const Vector3& objectPoint, const Vector2& imagePoint, const bool useDistortionParameters);
295
296 /**
297 * Deprecated.
298 *
299 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
300 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
301 * @param flippedCamera_T_world Inverted and flipped camera pose, transforming world to flipped camera, must be valid
302 * @param pinholeCamera The pinhole camera specifying the internal camera parameters and optionally distortion
303 * @param objectPoint 3D Object point defined in world
304 * @param imagePoint 2D Image point corresponding to the object point
305 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
306 * @return Resulting error value
307 */
308 static inline Vector2 determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const PinholeCamera& pinholeCamera, const Vector3& objectPoint, const Vector2& imagePoint, const bool useDistortionParameters);
309
310 /**
311 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
312 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
313 * @param world_T_camera The camera pose transforming camera to world, must be valid
314 * @param pinholeCamera The pinhole camera model defining the projection, must be valid
315 * @param objectPointAccessor The accessor providing the 3D object points defined in world
316 * @param imagePointAccessor The accessor providing the 2D image points corresponding to the image points, one image point for each object point
317 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
318 * @param zoom The zoom factor of the camera, with range (0, infinity), with 1 the default zoom factor
319 * @param errors Optional resulting error values individually for each given point correspondence
320 * @param sqrErrors Optional resulting squared error values individually for each given point correspondence
321 * @return Returns the average squared projection pixel error
322 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
323 * @tparam TAccessorImagePoints The template type of the accessor for the image points
324 * @tparam tUseBorderDistortionIfOutside True, to apply the camera distortion from the nearest point lying on the frame border if the point lies outside the visible camera area; False, to apply the distortion from the given position
325 * @tparam tResultingErrors True, if errors is defined
326 * @tparam tResultingSqrErrors True, if sqrErrors is defined
327 */
328 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside, bool tResultingErrors, bool tResultingSqrErrors>
329 static inline Scalar determinePoseError(const HomogenousMatrix4& world_T_camera, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, const Scalar zoom = Scalar(1), Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
330
331 /**
332 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
333 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
334 * @param world_T_camera The camera pose, transforming camera to world, must be valid
335 * @param anyCamera The camera profile defining the projection, must be valid
336 * @param objectPointAccessor The accessor providing the 3D object points corresponding to the given pose
337 * @param imagePointAccessor The accessor providing the 2D image points corresponding to the image points, one image point for each object point
338 * @param errors Optional resulting error values individually for each given point correspondence
339 * @param sqrErrors Optional resulting squared error values individually for each given point correspondence
340 * @return Returns the average squared projection pixel error
341 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
342 * @tparam TAccessorImagePoints The template type of the accessor for the image points
343 * @tparam tResultingErrors True, if errors is defined
344 * @tparam tResultingSqrErrors True, if sqrErrors is defined
345 */
346 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tResultingErrors, bool tResultingSqrErrors>
347 static Scalar determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& anyCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
348
349 /**
350 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
351 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
352 * @param flippedCamera_T_world Inverted and flipped extrinsic camera pose, transforming world to flipped camera, must be valid
353 * @param pinholeCamera The pinhole camera model defining the projection, must be valid
354 * @param objectPointAccessor The accessor providing the 3D object points defined in world
355 * @param imagePointAccessor The accessor providing the 2D image points corresponding to the image points, one image point for each object point
356 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
357 * @param zoom The zoom factor of the camera, with range (0, infinity), with 1 the default zoom factor
358 * @param errors Optional resulting error values individually for each given point correspondence
359 * @param sqrErrors Optional resulting squared error values individually for each given point correspondence
360 * @return Returns the average squared projection pixel error
361 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
362 * @tparam TAccessorImagePoints The template type of the accessor for the image points
363 * @tparam tUseBorderDistortionIfOutside True, to apply the camera distortion from the nearest point lying on the frame border if the point lies outside the visible camera area; False, to apply the distortion from the given position
364 * @tparam tResultingErrors True, if errors is defined
365 * @tparam tResultingSqrErrors True, if sqrErrors is defined
366 */
367 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside, bool tResultingErrors, bool tResultingSqrErrors>
368 static Scalar determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, const Scalar zoom = Scalar(1), Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
369
370 /**
371 * Determines the accuracy of the camera pose based on 2D/3D correspondences.
372 * The accuracy is determined based on the projection errors between projected 3D points and their corresponding 2D image points.
373 * @param flippedCamera_T_world The inverted and flipped camera pose, transforming world to flipped camera, must be valid
374 * @param anyCamera The camera profile defining the projection, must be valid
375 * @param objectPointAccessor The accessor providing the 3D object points corresponding to the given pose
376 * @param imagePointAccessor The accessor providing the 2D image points corresponding to the image points, one image point for each object point
377 * @param errors Optional resulting error values individually for each given point correspondence
378 * @param sqrErrors Optional resulting squared error values individually for each given point correspondence
379 * @return Returns the average squared projection pixel error
380 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
381 * @tparam TAccessorImagePoints The template type of the accessor for the image points
382 * @tparam tResultingErrors True, if errors is defined
383 * @tparam tResultingSqrErrors True, if sqrErrors is defined
384 */
385 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tResultingErrors, bool tResultingSqrErrors>
386 static Scalar determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& anyCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Vector2* errors = nullptr, Scalar* sqrErrors = nullptr);
387
388 /**
389 * Determines the accuracy of a camera pose in combination with the corresponding camera profile.
390 * The accuracy is determined by transforming the given 3D object points using the extrinsic camera parameter and projecting these points onto the image plane.
391 * @param world_T_camera The camera pose, transforming camera to world, with default camera pointing towards the negative z-space, with y-axis upwards, must be valid
392 * @param camera The camera profile defining the projection, must be valid
393 * @param objectPointAccessor Accessor providing the 3D object points corresponding to the given pose
394 * @param imagePointAccessor Accessor providing the 2D image points corresponding to the image points
395 * @param sqrAveragePixelError Average square projection pixel error
396 * @param sqrMinimalPixelError Minimal square projection pixel error
397 * @param sqrMaximalPixelError Maximal square projection pixel error
398 * @return True, if the error could be determined for all point correspondences; False, if the input was invalid or e.g., a 3D object point is located behind the camera and 'tOnlyFrontObjectPoints == true'
399 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
400 * @tparam TAccessorImagePoints The template type of the accessor for the image points
401 * @tparam tOnlyFrontObjectPoints True, to fail in case a 3D object point is not in front of the camera; False, to ignore whether 3D object points are in front of behind the camera
402 */
403 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tOnlyFrontObjectPoints>
404 [[nodiscard]] static inline bool determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError);
405
406 /**
407 * Deprecated.
408 *
409 * Determines the accuracy of a camera pose in combination with the corresponding camera profile.
410 * The accuracy is determined by transforming the given 3D object points using the extrinsic camera parameter and projecting these points onto the image plane.
411 * @param world_T_camera The camera pose, transforming camera to world, with default camera pointing towards the negative z-space, with y-axis upwards, must be valid
412 * @param camera The camera profile defining the projection, must be valid
413 * @param objectPointAccessor Accessor providing the 3D object points corresponding to the given pose
414 * @param imagePointAccessor Accessor providing the 2D image points corresponding to the image points
415 * @param sqrAveragePixelError Average square projection pixel error
416 * @param sqrMinimalPixelError Minimal square projection pixel error
417 * @param sqrMaximalPixelError Maximal square projection pixel error
418 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
419 * @tparam TAccessorImagePoints The template type of the accessor for the image points
420 */
421 template <typename TAccessorObjectPoints, typename TAccessorImagePoints>
422 static inline void determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError);
423
424 /**
425 * Determines the accuracy of the extrinsic and intrinsic camera matrix (and camera distortion if requested).
426 * The accuracy is determined by transforming the given 3D object points using the extrinsic camera parameter and projecting these points onto the image plane.
427 * @param world_T_camera The camera pose, transforming camera to world, must be valid
428 * @param pinholeCamera The pinhole camera specifying the internal camera parameters and optionally distortion
429 * @param objectPointAccessor Accessor providing the 3D object points corresponding to the given pose
430 * @param imagePointAccessor Accessor providing the 2D image points corresponding to the image points
431 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
432 * @param sqrAveragePixelError Average square projection pixel error
433 * @param sqrMinimalPixelError Minimal square projection pixel error
434 * @param sqrMaximalPixelError Maximal square projection pixel error
435 * @param zoom The zoom factor of the camera, with range (0, infinity), with 1 the default zoom factor
436 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
437 * @tparam TAccessorImagePoints The template type of the accessor for the image points
438 * @tparam tUseBorderDistortionIfOutside True, to apply the camera distortion from the nearest point lying on the frame border if the point lies outside the visible camera area; False, to apply the distortion from the given position
439 */
440 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside>
441 static inline void determinePoseError(const HomogenousMatrix4& world_T_camera, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError, const Scalar zoom = Scalar(1));
442
443 /**
444 * Determines the accuracy of a camera pose in combination with the corresponding camera profile.
445 * The accuracy is determined by transforming the given 3D object points using the extrinsic camera parameter and projecting these points onto the image plane.<br>
446 * @param flippedCamera_T_world The inverted and flipped camera pose, transforming world to flipped camera, with default flipped camera pointing towards the positive z-space, with y-axis downwards, must be valid
447 * @param camera The camera profile defining the projection, must be valid
448 * @param objectPointAccessor Accessor providing the 3D object points defined in world
449 * @param imagePointAccessor Accessor providing the 2D image points corresponding to the image points
450 * @param sqrAveragePixelError Average square projection pixel error
451 * @param sqrMinimalPixelError Minimal square projection pixel error
452 * @param sqrMaximalPixelError Maximal square projection pixel error
453 * @return True, if the error could be determined for all point correspondences; False, if the input was invalid or e.g., a 3D object point is located behind the camera and 'tOnlyFrontObjectPoints == true'
454 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
455 * @tparam TAccessorImagePoints The template type of the accessor for the image points
456 * @tparam tOnlyFrontObjectPoints True, to fail in case a 3D object point is not in front of the camera; False, to ignore whether 3D object points are in front of behind the camera
457 */
458 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tOnlyFrontObjectPoints>
459 [[nodiscard]] static bool determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError);
460
461 /**
462 * Deprecated.
463 *
464 * Determines the accuracy of a camera pose in combination with the corresponding camera profile.
465 * The accuracy is determined by transforming the given 3D object points using the extrinsic camera parameter and projecting these points onto the image plane.<br>
466 * @param flippedCamera_T_world The inverted and flipped camera pose, transforming world to flipped camera, with default flipped camera pointing towards the positive z-space, with y-axis downwards, must be valid
467 * @param camera The camera profile defining the projection, must be valid
468 * @param objectPointAccessor Accessor providing the 3D object points defined in world
469 * @param imagePointAccessor Accessor providing the 2D image points corresponding to the image points
470 * @param sqrAveragePixelError Average square projection pixel error
471 * @param sqrMinimalPixelError Minimal square projection pixel error
472 * @param sqrMaximalPixelError Maximal square projection pixel error
473 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
474 * @tparam TAccessorImagePoints The template type of the accessor for the image points
475 */
476 template <typename TAccessorObjectPoints, typename TAccessorImagePoints>
477 static void determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError);
478
479 /**
480 * Determines the accuracy of the extrinsic and intrinsic camera matrix (and camera distortion if requested).
481 * The accuracy is determined by transforming the given 3D object points using the extrinsic camera parameter and projecting these points onto the image plane.<br>
482 * Beware: The given camera matrix is not equal to a extrinsic matrix.<br>
483 * Instead, camera matrix is the extrinsic camera matrix flipped around the x-axis and inverted afterwards.
484 * @param flippedCamera_T_world Inverted and flipped camera pose, transforming world to flipped camera, must be valid
485 * @param pinholeCamera The pinhole camera specifying the internal camera parameters and optionally distortion
486 * @param objectPointAccessor Accessor providing the 3D object points defined in world
487 * @param imagePointAccessor Accessor providing the 2D image points corresponding to the image points
488 * @param useDistortionParameters True, to respect the distortion parameters of the given camera during object point projection
489 * @param sqrAveragePixelError Average square projection pixel error
490 * @param sqrMinimalPixelError Minimal square projection pixel error
491 * @param sqrMaximalPixelError Maximal square projection pixel error
492 * @param zoom The zoom factor of the camera, with range (0, infinity), with 1 the default zoom factor
493 * @tparam TAccessorObjectPoints The template type of the accessor for the object points
494 * @tparam TAccessorImagePoints The template type of the accessor for the image points
495 * @tparam tUseBorderDistortionIfOutside True, to apply the camera distortion from the nearest point lying on the frame border if the point lies outside the visible camera area; False, to apply the distortion from the given position
496 */
497 template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside>
498 static void determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError, const Scalar zoom = Scalar(1));
499
500 /**
501 * Determines the unique robust minimal average square error between two 2D points clouds.
502 * This function calls uniqueAveragedRobustErrorInPointCloud() or ambiguousAveragedRobustErrorInPointCloud() depending on the uniqueCorrespondences parameter.<br>
503 * @param imagePoints Image points to determine the minimal errors for, must be valid
504 * @param numberImagePoints Number of given image points, with range [1, numberCandidatePoints]
505 * @param validImagePoints The number of image points which can be expected to have a unique corresponding point inside the candidate set, with range [1, numberImagePoints]
506 * @param candidatePoints Possible candidate image points to be used for finding the minimal error, must be valid
507 * @param numberCandidatePoints Number of given candidate points, with range [1, infinity)
508 * @param errorDetermination Depending on this flag uniqueAveragedRobustErrorInPointCloud, approximatedAveragedRobustErrorInPointCloud() or ambiguousAveragedRobustErrorInPointCloud() will be used
509 * @param correspondences Optional resulting point correspondences, for each index of an image point one corresponding candidate point index will be returned
510 * @return Robust averaged square error
511 * @tparam tEstimator Estimator type to be applied
512 * @see uniqueAveragedRobustErrorInPointCloud(), approximatedAveragedRobustErrorInPointCloud(), ambiguousAveragedRobustErrorInPointCloud().
513 */
514 template <Estimator::EstimatorType tEstimator>
515 static Scalar averagedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, const ErrorDetermination errorDetermination, IndexPairs32* correspondences = nullptr);
516
517 /**
518 * Determines the unique robust minimal average square error between two 2D points clouds.
519 * The given point clouds may not have the same size, and therefore the point order does not define any correspondences.<br>
520 * However, the number of given image points must be smaller or equal to the number of given candidate points.<br>
521 * The minimal error is found by determining the absolute minimal error between image points and candidate points.<br>
522 *
523 * Candidate points already used will not be used for any further correspondences creating unique correspondences.<br>
524 * The uniqueness is guaranteed due to a brute force calculation of all possible distances.
525 * @param imagePoints Image points to determine the minimal errors for, must be valid
526 * @param numberImagePoints Number of given image points, with range [1, numberCandidatePoints]
527 * @param validImagePoints The number of image points which can be expected to have a unique corresponding point inside the candidate set, with range [1, numberImagePoints]
528 * @param candidatePoints Possible candidate image points to be used for finding the minimal error, must be valid
529 * @param numberCandidatePoints Number of given candidate points, with range [1, infinity)
530 * @param correspondences Optional resulting point correspondences, for each index of an image point one corresponding candidate point index will be returned
531 * @return Robust averaged minimal square error
532 * @tparam tEstimator Estimator type to be applied
533 * @see approximatedAveragedRobustErrorInPointCloud(), ambiguousAveragedRobustErrorInPointCloud().
534 */
535 template <Estimator::EstimatorType tEstimator>
536 static Scalar uniqueAveragedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, IndexPairs32* correspondences = nullptr);
537
538 /**
539 * Determines the approximated robust minimal average square error between two 2D points clouds.
540 * The given point clouds may not have the same size, and therefore the point order does not define any correspondences.<br>
541 * However, the number of given image points must be smaller or equal to the number of given candidate points.<br>
542 * The minimal error is found by determining the approximated absolute minimal error between image points and candidate points.
543 *
544 * Candidate points already used will not be used for any further correspondences creating unique correspondences.<br>
545 * Instead of using a brute force method this function flags already used candidate points to avoid the second usage, however the result may not be the global optimum.
546 * @param imagePoints Image points to determine the minimal errors for, must be valid
547 * @param numberImagePoints Number of given image points, with range [1, numberCandidatePoints]
548 * @param validImagePoints The number of image points which can be expected to have a unique corresponding point inside the candidate set, with range [1, numberImagePoints]
549 * @param candidatePoints Possible candidate image points to be used for finding the minimal error, must be valid
550 * @param numberCandidatePoints Number of given candidate points, with range [1, infinity)
551 * @param correspondences Optional resulting point correspondences, for each index of an image point one corresponding candidate point index will be returned
552 * @return Robust averaged minimal square error
553 * @tparam tEstimator Estimator type to be applied
554 * @see uniqueAveragedRobustErrorInPointCloud(), ambiguousAveragedRobustErrorInPointCloud().
555 */
556 template <Estimator::EstimatorType tEstimator>
557 static Scalar approximatedAveragedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, IndexPairs32* correspondences = nullptr);
558
559 /**
560 * Determines the ambiguous robust minimal average square error between two 2D points clouds.
561 * The given point clouds may not have the same size, and therefore the point order does not define any correspondences.<br>
562 * However, the number of given image points must be smaller or equal to the number of given candidate points.<br>
563 * The minimal error is found by determining the absolute minimal error between image points and candidate points.<br>
564 *
565 * Beware: Candidate points already used may be used for any further correspondences also making ambiguous correspondences.<br>
566 * Thus, in a worst case all points may be assigned to one unique candidate point.
567 * @param imagePoints Image points to determine the minimal errors for, must be valid
568 * @param numberImagePoints Number of given image points, with range [1, numberCandidatePoints]
569 * @param validImagePoints The number of image points which can be expected to have a unique corresponding point inside the candidate set, with range [1, numberImagePoints]
570 * @param candidatePoints Possible candidate image points to be used for finding the minimal error, must be valid
571 * @param numberCandidatePoints Number of given candidate points, with range [1, infinity)
572 * @param correspondences Optional resulting point correspondences, for each index of an image point one corresponding candidate point index will be returned
573 * @return Robust averaged minimal square error
574 * @see uniqueAveragedRobustErrorInPointCloud(), approximatedAveragedRobustErrorInPointCloud().
575 * @tparam tEstimator Estimator type to be applied
576 */
577 template <Estimator::EstimatorType tEstimator>
578 static Scalar ambiguousAveragedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, IndexPairs32* correspondences = nullptr);
579
580 /**
581 * Returns the averaged robust error for a given set of error values using a defined estimator.
582 * @param sqrErrors Specified squared error values to return the averaged error for
583 * @param number The number of given error values
584 * @param explicitWeights Optional additional weight values individual for each error to be applied to the resulting average robust error only
585 * @return Averaged robust error
586 * @tparam tEstimator Robust estimator to be used for error calculation
587 */
588 template <Estimator::EstimatorType tEstimator>
589 static Scalar averagedRobustError(const Scalar* sqrErrors, const size_t number, const Scalar* explicitWeights = nullptr);
590
591 /**
592 * Returns the averaged robust error for a given set of error values using a defined estimator.
593 * @param sqrErrors Specified squared error values to return the averaged error for
594 * @param number The number of given error values
595 * @param estimator Robust estimator to be used for error calculation
596 * @param explicitWeights Optional additional weight values individual for each error to be applied to the resulting average robust error only
597 * @return Averaged robust error
598 */
599 static inline Scalar averagedRobustError(const Scalar* sqrErrors, const size_t number, const Estimator::EstimatorType estimator, const Scalar* explicitWeights = nullptr);
600
601 /**
602 * Returns the averaged robust error for a given set of error values using a defined estimator.
603 * Not all error values are used but those defined as indices.
604 * @param sqrErrors Specified squared error values to return the summed error for
605 * @param indices Indices of the given error values to be used for error calculation
606 * @param numberIndices Number of given indices
607 * @param explicitWeights Optional additional weight values individual for each error to be applied to the resulting average robust error only
608 * @return Averaged robust error
609 * @tparam tEstimator Robust estimator to be used for error calculation
610 */
611 template <Estimator::EstimatorType tEstimator>
612 static Scalar averagedRobustError(const Scalar* sqrErrors, const unsigned int* indices, const size_t numberIndices, const Scalar* explicitWeights = nullptr);
613
614 /**
615 * Returns the averaged robust error for a given set of error values using a defined estimator.
616 * Not all error values are used but those defined as indices.
617 * @param sqrErrors Specified squared error values to return the summed error for
618 * @param indices Indices of the given error values to be used for error calculation
619 * @param numberIndices Number of given indices
620 * @param estimator Robust estimator to be used for error calculation
621 * @param explicitWeights Optional additional weight values individual for each error to be applied to the resulting average robust error only
622 * @return Averaged robust error
623 */
624 static inline Scalar averagedRobustError(const Scalar* sqrErrors, const unsigned int* indices, const size_t numberIndices, const Estimator::EstimatorType estimator, const Scalar* explicitWeights = nullptr);
625};
626
627inline Error::ErrorElement::ErrorElement(const unsigned int imageIndex, const unsigned int candidateIndex, const Scalar error) :
628 imageIndex_(imageIndex),
629 candidateIndex_(candidateIndex),
630 error_(error)
631{
632 // nothing to do here
633}
634
635inline unsigned int Error::ErrorElement::imageIndex() const
636{
637 return imageIndex_;
638}
639
640inline unsigned int Error::ErrorElement::candidateIndex() const
641{
642 return candidateIndex_;
643}
644
646{
647 return error_;
648}
649
650inline bool Error::ErrorElement::operator<(const ErrorElement& element) const
651{
652 return error_ < element.error_;
653}
654
655inline bool Error::posesAlmostEqual(const HomogenousMatrix4& poseFirst, const HomogenousMatrix4& poseSecond, const Vector3& maxTranslationOffset)
656{
657 const Vector3 poseFirstPosition(poseFirst.translation());
658 const Vector3 poseSecondPosition(poseSecond.translation());
659
660 return Numeric::abs(poseFirstPosition.x() - poseSecondPosition.x()) <= maxTranslationOffset.x()
661 && Numeric::abs(poseFirstPosition.y() - poseSecondPosition.y()) <= maxTranslationOffset.y()
662 && Numeric::abs(poseFirstPosition.z() - poseSecondPosition.z()) <= maxTranslationOffset.z();
663}
664
665inline bool Error::posesAlmostEqual(const HomogenousMatrix4& poseFirst, const HomogenousMatrix4& poseSecond, const Scalar maxOrientationOffset)
666{
667 const Quaternion poseFirstOrientation(poseFirst.rotation());
668 const Quaternion poseSecondOrientation(poseSecond.rotation());
669
670 const Scalar maxOrientationOffsetCos2(Numeric::cos(maxOrientationOffset * Scalar(0.5)));
671
672 return poseFirstOrientation.cos2(poseSecondOrientation) >= maxOrientationOffsetCos2;
673}
674
675template <typename TAccessorImagePoints, bool tResultingErrors, bool tResultingSqrErrors>
676Scalar Error::determineHomographyError(const SquareMatrix3& points1_H_points0, const TAccessorImagePoints& imagePointAccessor0, const TAccessorImagePoints& imagePointAccessor1, Vector2* errors, Scalar* sqrErrors)
677{
678 ocean_assert(imagePointAccessor0.size() == imagePointAccessor1.size());
679
680 if (imagePointAccessor0.isEmpty())
681 {
682 return Scalar(0);
683 }
684
685 ocean_assert((tResultingErrors && errors != nullptr) || (!tResultingErrors && errors == nullptr));
686 ocean_assert((tResultingSqrErrors && sqrErrors != nullptr) || (!tResultingSqrErrors && sqrErrors == nullptr));
687
688 ocean_assert(!points1_H_points0.isSingular());
689
690 Scalar sqrAveragePixelError = 0;
691
692 for (size_t n = 0; n < imagePointAccessor0.size(); ++n)
693 {
694 Vector2 transformedPoint;
695
696 if (points1_H_points0.multiply(imagePointAccessor0[n], transformedPoint))
697 {
698 const Vector2& measuredPoint = imagePointAccessor1[n];
699
700 const Vector2 difference(transformedPoint - measuredPoint);
701 const Scalar sqrPixelError = difference.sqr();
702
703 sqrAveragePixelError += sqrPixelError;
704
705 if constexpr (tResultingErrors)
706 {
707 errors[n] = difference;
708 }
709
710 if constexpr (tResultingSqrErrors)
711 {
712 sqrErrors[n] = sqrPixelError;
713 }
714 }
715 else
716 {
717 if constexpr (tResultingErrors)
718 {
720 }
721
722 if constexpr (tResultingSqrErrors)
723 {
724 sqrErrors[n] = Numeric::maxValue();
725 }
726
727 sqrAveragePixelError = Numeric::maxValue();
728 }
729 }
730
731 ocean_assert(imagePointAccessor0.size() != 0);
732 ocean_assert(sqrAveragePixelError >= 0 && sqrAveragePixelError <= Numeric::maxValue());
733
734 return sqrAveragePixelError / Scalar(imagePointAccessor0.size());
735}
736
737template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside, bool tResultingErrors, bool tResultingSqrErrors>
738inline Scalar Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, const Scalar zoom, Vector2* errors, Scalar* sqrErrors)
739{
740 return determinePoseErrorIF<TAccessorObjectPoints, TAccessorImagePoints, tUseBorderDistortionIfOutside, tResultingErrors, tResultingSqrErrors>(PinholeCamera::standard2InvertedFlipped(world_T_camera), pinholeCamera, objectPointAccessor, imagePointAccessor, useDistortionParameters, zoom, errors, sqrErrors);
741}
742
743template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tResultingErrors, bool tResultingSqrErrors>
744inline Scalar Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& anyCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Vector2* errors, Scalar* sqrErrors)
745{
746 return determinePoseErrorIF<TAccessorObjectPoints, TAccessorImagePoints, tResultingErrors, tResultingSqrErrors>(PinholeCamera::standard2InvertedFlipped(world_T_camera), anyCamera, objectPointAccessor, imagePointAccessor, errors, sqrErrors);
747}
748
749template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tOnlyFrontObjectPoints>
750inline bool Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError)
751{
752 return determinePoseErrorIF<TAccessorObjectPoints, TAccessorImagePoints, tOnlyFrontObjectPoints>(AnyCamera::standard2InvertedFlipped(world_T_camera), camera, objectPointAccessor, imagePointAccessor, sqrAveragePixelError, sqrMinimalPixelError,sqrMaximalPixelError);
753}
754
755template <typename TAccessorObjectPoints, typename TAccessorImagePoints>
756inline void Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError)
757{
758 determinePoseErrorIF<TAccessorObjectPoints, TAccessorImagePoints>(AnyCamera::standard2InvertedFlipped(world_T_camera), camera, objectPointAccessor, imagePointAccessor, sqrAveragePixelError, sqrMinimalPixelError,sqrMaximalPixelError);
759}
760
761template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside>
762inline void Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError, const Scalar zoom)
763{
764 determinePoseErrorIF<TAccessorObjectPoints, TAccessorImagePoints, tUseBorderDistortionIfOutside>(PinholeCamera::standard2InvertedFlipped(world_T_camera), pinholeCamera, objectPointAccessor, imagePointAccessor, useDistortionParameters, sqrAveragePixelError, sqrMinimalPixelError,sqrMaximalPixelError, zoom);
765}
766
767inline Vector2 Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const AnyCamera& camera, const Vector3& objectPoint, const Vector2& imagePoint)
768{
769 return determinePoseErrorIF(PinholeCamera::standard2InvertedFlipped(world_T_camera), camera, objectPoint, imagePoint);
770}
771
772inline Vector2 Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, const Vector3& objectPoint, const Vector2& imagePoint)
773{
774 return camera.projectToImageIF(flippedCamera_T_world, objectPoint) - imagePoint;
775}
776
777inline Vector2 Error::determinePoseError(const HomogenousMatrix4& world_T_camera, const PinholeCamera& pinholeCamera, const Vector3& objectPoint, const Vector2& imagePoint, const bool useDistortionParameters)
778{
779 return determinePoseErrorIF(PinholeCamera::standard2InvertedFlipped(world_T_camera), pinholeCamera, objectPoint, imagePoint, useDistortionParameters);
780}
781
782inline Vector2 Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const PinholeCamera& pinholeCamera, const Vector3& objectPoint, const Vector2& imagePoint, const bool useDistortionParameters)
783{
784 return pinholeCamera.projectToImageIF<true>(flippedCamera_T_world, objectPoint, useDistortionParameters) - imagePoint;
785}
786
787template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside, bool tResultingErrors, bool tResultingSqrErrors>
788Scalar Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, const Scalar zoom, Vector2* errors, Scalar* sqrErrors)
789{
790 ocean_assert(flippedCamera_T_world.isValid());
791 ocean_assert(objectPointAccessor.size() == imagePointAccessor.size());
792 ocean_assert((tResultingErrors && errors != nullptr) || (!tResultingErrors && errors == nullptr));
793 ocean_assert((tResultingSqrErrors && sqrErrors != nullptr) || (!tResultingSqrErrors && sqrErrors == nullptr));
794 ocean_assert(zoom > Numeric::eps());
795
796 if (objectPointAccessor.isEmpty())
797 {
798 return 0;
799 }
800
801 Scalar sqrAveragePixelError = 0;
802
803 if (useDistortionParameters && pinholeCamera.hasDistortionParameters())
804 {
805 for (size_t n = 0; n < objectPointAccessor.size(); ++n)
806 {
807 const Vector2 imagePoint(pinholeCamera.projectToImageIF<true, tUseBorderDistortionIfOutside>(flippedCamera_T_world, objectPointAccessor[n], zoom));
808 const Vector2& measuredImagePoint = imagePointAccessor[n];
809
810 const Vector2 difference(imagePoint - measuredImagePoint);
811 const Scalar sqrPixelError = difference.sqr();
812
813 sqrAveragePixelError += sqrPixelError;
814
815 if constexpr (tResultingErrors)
816 {
817 errors[n] = difference;
818 }
819
820 if constexpr (tResultingSqrErrors)
821 {
822 sqrErrors[n] = sqrPixelError;
823 }
824 }
825 }
826 else
827 {
828 // create one transformation matrix covering the entire pipeline (transformation and then projection)
829 const HomogenousMatrix4 transformationMatrixIF(pinholeCamera.transformationMatrixIF(flippedCamera_T_world, zoom));
830
831#ifdef OCEAN_DEBUG
832 SquareMatrix4 debugCameraMatrix(pinholeCamera.intrinsic());
833 debugCameraMatrix(0, 0) *= zoom;
834 debugCameraMatrix(1, 1) *= zoom;
835 debugCameraMatrix[15] = 1;
836
837 const SquareMatrix4 debugEntireMatrix(debugCameraMatrix * (SquareMatrix4&)flippedCamera_T_world);
838 const HomogenousMatrix4& debugTransformationMatrixIF = (const HomogenousMatrix4&)debugEntireMatrix;
839
840 ocean_assert(transformationMatrixIF == debugTransformationMatrixIF);
841#endif
842
843 for (size_t n = 0; n < objectPointAccessor.size(); ++n)
844 {
845 const Vector3 transformedObjectPoint(transformationMatrixIF * objectPointAccessor[n]);
846
847 ocean_assert(Numeric::isNotEqualEps(transformedObjectPoint.z()));
848 const Scalar factor = Scalar(1) / transformedObjectPoint.z();
849
850 const Vector2 imagePoint(transformedObjectPoint.x() * factor, transformedObjectPoint.y() * factor);
851
852 const Vector2 difference(imagePoint - imagePointAccessor[n]);
853 const Scalar sqrPixelError = difference.sqr();
854
855 sqrAveragePixelError += sqrPixelError;
856
857 if constexpr (tResultingErrors)
858 {
859 errors[n] = difference;
860 }
861
862 if constexpr (tResultingSqrErrors)
863 {
864 sqrErrors[n] = sqrPixelError;
865 }
866 }
867 }
868
869 ocean_assert(objectPointAccessor.size() != 0);
870 return sqrAveragePixelError / Scalar(objectPointAccessor.size());
871}
872
873template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tResultingErrors, bool tResultingSqrErrors>
874Scalar Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& anyCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Vector2* errors, Scalar* sqrErrors)
875{
876 ocean_assert(flippedCamera_T_world.isValid() && anyCamera.isValid());
877 ocean_assert(objectPointAccessor.size() == imagePointAccessor.size());
878
879 ocean_assert((tResultingErrors && errors != nullptr) || (!tResultingErrors && errors == nullptr));
880 ocean_assert((tResultingSqrErrors && sqrErrors != nullptr) || (!tResultingSqrErrors && sqrErrors == nullptr));
881
882 if (objectPointAccessor.isEmpty())
883 {
884 return 0;
885 }
886
887 Scalar sqrAveragePixelError = 0;
888
889 for (size_t n = 0; n < objectPointAccessor.size(); ++n)
890 {
891 const Vector2 imagePoint = anyCamera.projectToImageIF(flippedCamera_T_world, objectPointAccessor[n]);
892
893 const Vector2& measuredImagePoint = imagePointAccessor[n];
894
895 const Vector2 difference(imagePoint - measuredImagePoint);
896 const Scalar sqrPixelError = difference.sqr();
897
898 sqrAveragePixelError += sqrPixelError;
899
900 if constexpr (tResultingErrors)
901 {
902 errors[n] = difference;
903 }
904
905 if constexpr (tResultingSqrErrors)
906 {
907 sqrErrors[n] = sqrPixelError;
908 }
909 }
910
911 ocean_assert(objectPointAccessor.size() != 0);
912 return sqrAveragePixelError / Scalar(objectPointAccessor.size());
913}
914
915template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tOnlyFrontObjectPoints>
916bool Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError)
917{
918 ocean_assert(flippedCamera_T_world.isValid());
919 ocean_assert(objectPointAccessor.size() == imagePointAccessor.size());
920
921 sqrAveragePixelError = 0;
922 sqrMinimalPixelError = Numeric::maxValue();
923 sqrMaximalPixelError = 0;
924
925 if (objectPointAccessor.isEmpty())
926 {
927 return true;
928 }
929
930 for (size_t n = 0; n < objectPointAccessor.size(); ++n)
931 {
932 const Vector3& objectPoint = objectPointAccessor[n];
933
934 if constexpr (tOnlyFrontObjectPoints)
935 {
936 if (!AnyCamera::isObjectPointInFrontIF(flippedCamera_T_world, objectPoint))
937 {
938 return false;
939 }
940 }
941
942 const Vector2 projectedImagePoint(camera.projectToImageIF(flippedCamera_T_world, objectPoint));
943 const Vector2& imagePoint = imagePointAccessor[n];
944
945 const Scalar pixelError = projectedImagePoint.sqrDistance(imagePoint);
946
947 sqrAveragePixelError += pixelError;
948
949 if (pixelError < sqrMinimalPixelError)
950 {
951 sqrMinimalPixelError = pixelError;
952 }
953
954 if (pixelError > sqrMaximalPixelError)
955 {
956 sqrMaximalPixelError = pixelError;
957 }
958 }
959
960 ocean_assert(objectPointAccessor.size() != 0);
961 sqrAveragePixelError /= Scalar(objectPointAccessor.size());
962
963 return true;
964}
965
966
967template <typename TAccessorObjectPoints, typename TAccessorImagePoints>
968void Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError)
969{
970 constexpr bool tOnlyFrontObjectPoints = false;
971
972 const bool result = determinePoseErrorIF<TAccessorObjectPoints, TAccessorImagePoints, tOnlyFrontObjectPoints>(flippedCamera_T_world, camera, objectPointAccessor, imagePointAccessor, sqrAveragePixelError, sqrMinimalPixelError, sqrMaximalPixelError);
973 ocean_assert_and_suppress_unused(result, result);
974}
975
976template <typename TAccessorObjectPoints, typename TAccessorImagePoints, bool tUseBorderDistortionIfOutside>
977void Error::determinePoseErrorIF(const HomogenousMatrix4& flippedCamera_T_world, const PinholeCamera& pinholeCamera, const TAccessorObjectPoints& objectPointAccessor, const TAccessorImagePoints& imagePointAccessor, const bool useDistortionParameters, Scalar& sqrAveragePixelError, Scalar& sqrMinimalPixelError, Scalar& sqrMaximalPixelError, const Scalar zoom)
978{
979 ocean_assert(flippedCamera_T_world.isValid());
980 ocean_assert(objectPointAccessor.size() == imagePointAccessor.size());
981 ocean_assert(zoom > Numeric::eps());
982
983 sqrAveragePixelError = 0;
984 sqrMinimalPixelError = Numeric::maxValue();
985 sqrMaximalPixelError = 0;
986
987 if (objectPointAccessor.isEmpty())
988 {
989 return;
990 }
991
992 if (useDistortionParameters && pinholeCamera.hasDistortionParameters())
993 {
994 for (size_t n = 0; n < objectPointAccessor.size(); ++n)
995 {
996 const Vector2 imagePoint(pinholeCamera.projectToImageIF<tUseBorderDistortionIfOutside>(flippedCamera_T_world, objectPointAccessor[n], useDistortionParameters, zoom));
997 const Vector2& realImagePoint = imagePointAccessor[n];
998
999 const Scalar pixelError = imagePoint.sqrDistance(realImagePoint);
1000
1001 sqrAveragePixelError += pixelError;
1002
1003 if (pixelError < sqrMinimalPixelError)
1004 {
1005 sqrMinimalPixelError = pixelError;
1006 }
1007
1008 if (pixelError > sqrMaximalPixelError)
1009 {
1010 sqrMaximalPixelError = pixelError;
1011 }
1012 }
1013 }
1014 else
1015 {
1016 // create one transformation matrix covering the entire pipeline (transformation and then projection)
1017 const HomogenousMatrix4 transformationMatrixIF(pinholeCamera.transformationMatrixIF(flippedCamera_T_world, zoom));
1018
1019#ifdef OCEAN_DEBUG
1020 SquareMatrix4 debugCameraMatrix(pinholeCamera.intrinsic());
1021 debugCameraMatrix(0, 0) *= zoom;
1022 debugCameraMatrix(1, 1) *= zoom;
1023 debugCameraMatrix[15] = 1;
1024
1025 const SquareMatrix4 debugEntireMatrix(debugCameraMatrix * (SquareMatrix4&)flippedCamera_T_world);
1026 const HomogenousMatrix4& debugTransformationMatrixIF = (const HomogenousMatrix4&)debugEntireMatrix;
1027
1028 ocean_assert(transformationMatrixIF == debugTransformationMatrixIF);
1029#endif
1030
1031 for (size_t n = 0; n < objectPointAccessor.size(); ++n)
1032 {
1033 const Vector3 transformedObjectPoint(transformationMatrixIF * objectPointAccessor[n]);
1034
1035 ocean_assert(Numeric::isNotEqualEps(transformedObjectPoint.z()));
1036 const Scalar factor = Scalar(1) / transformedObjectPoint.z();
1037
1038 const Vector2 imagePoint(transformedObjectPoint.x() * factor, transformedObjectPoint.y() * factor);
1039
1040 const Scalar pixelError = imagePoint.sqrDistance(imagePointAccessor[n]);
1041
1042 sqrAveragePixelError += pixelError;
1043
1044 if (pixelError < sqrMinimalPixelError)
1045 {
1046 sqrMinimalPixelError = pixelError;
1047 }
1048
1049 if (pixelError > sqrMaximalPixelError)
1050 {
1051 sqrMaximalPixelError = pixelError;
1052 }
1053 }
1054 }
1055
1056 ocean_assert(objectPointAccessor.size() != 0);
1057 sqrAveragePixelError /= Scalar(objectPointAccessor.size());
1058}
1059
1060template <Estimator::EstimatorType tEstimator>
1061Scalar Error::averagedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, const ErrorDetermination errorDetermination, IndexPairs32* correspondences)
1062{
1063 ocean_assert(imagePoints && candidatePoints);
1064 ocean_assert(numberImagePoints <= numberCandidatePoints);
1065 ocean_assert(validImagePoints <= numberImagePoints);
1066
1067 switch (errorDetermination)
1068 {
1069 case ED_UNIQUE:
1070 return uniqueAveragedRobustErrorInPointCloud<tEstimator>(imagePoints, numberImagePoints, validImagePoints, candidatePoints, numberCandidatePoints, correspondences);
1071
1072 case ED_APPROXIMATED:
1073 return approximatedAveragedRobustErrorInPointCloud<tEstimator>(imagePoints, numberImagePoints, validImagePoints, candidatePoints, numberCandidatePoints, correspondences);
1074
1075 case ED_AMBIGUOUS:
1076 return ambiguousAveragedRobustErrorInPointCloud<tEstimator>(imagePoints, numberImagePoints, validImagePoints, candidatePoints, numberCandidatePoints, correspondences);
1077
1078 default:
1079 break;
1080 }
1081
1082 ocean_assert(false && "Invalid errorDetermination parameter!");
1083 return Numeric::maxValue();
1084}
1085
1086template <Estimator::EstimatorType tEstimator>
1087Scalar Error::uniqueAveragedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, IndexPairs32* correspondences)
1088{
1089 ocean_assert(imagePoints && candidatePoints);
1090 ocean_assert(numberImagePoints <= numberCandidatePoints);
1091 ocean_assert(validImagePoints <= numberImagePoints);
1092
1093 if (numberImagePoints > numberCandidatePoints)
1094 return Numeric::maxValue();
1095
1096 ErrorElements errorElements;
1097 errorElements.reserve(numberImagePoints * numberCandidatePoints);
1098
1099 for (unsigned int i = 0; i < numberImagePoints; ++i)
1100 {
1101 const Vector2& imagePoint(imagePoints[i]);
1102
1103 for (unsigned int c = 0; c < numberCandidatePoints; ++c)
1104 {
1105 errorElements.push_back(ErrorElement(i, c, imagePoint.sqrDistance(candidatePoints[c])));
1106 }
1107 }
1108
1109 std::sort(errorElements.begin(), errorElements.end());
1110
1111 std::vector<unsigned char> usedImagePoints(numberImagePoints, 0u);
1112 std::vector<unsigned char> usedCandidatePoints(numberCandidatePoints, 0u);
1113
1114 if constexpr (Estimator::isStandardEstimator<tEstimator>())
1115 {
1116 size_t numberUsedErrors = 0;
1117 Scalar sqrErrors = 0;
1118
1119 ErrorElements::const_iterator i = errorElements.begin();
1120 while (i != errorElements.end() && numberUsedErrors < validImagePoints)
1121 {
1122 ocean_assert(i != errorElements.end());
1123
1124 if (usedImagePoints[i->imageIndex()] == 0u && usedCandidatePoints[i->candidateIndex()] == 0u)
1125 {
1126 sqrErrors += i->error();
1127 numberUsedErrors++;
1128
1129 usedImagePoints[i->imageIndex()] = 1u;
1130 usedCandidatePoints[i->candidateIndex()] = 1u;
1131
1132 if (correspondences)
1133 {
1134 correspondences->push_back(IndexPair32(i->imageIndex(), i->candidateIndex()));
1135 }
1136 }
1137
1138 ++i;
1139 }
1140
1141 if (numberUsedErrors == 0)
1142 {
1143 return 0;
1144 }
1145
1146 return sqrErrors / Scalar(numberUsedErrors);
1147 }
1148 else
1149 {
1150 Scalars sqrErrors;
1151 sqrErrors.reserve(validImagePoints);
1152
1153 ErrorElements::const_iterator i = errorElements.begin();
1154 while (i != errorElements.end() && sqrErrors.size() < validImagePoints)
1155 {
1156 ocean_assert(i != errorElements.end());
1157
1158 if (usedImagePoints[i->imageIndex()] == 0u && usedCandidatePoints[i->candidateIndex()] == 0u)
1159 {
1160 sqrErrors.push_back(i->error());
1161
1162 usedImagePoints[i->imageIndex()] = 1u;
1163 usedCandidatePoints[i->candidateIndex()] = 1u;
1164
1165 if (correspondences)
1166 {
1167 correspondences->push_back(IndexPair32(i->imageIndex(), i->candidateIndex()));
1168 }
1169 }
1170
1171 ++i;
1172 }
1173
1174 if (sqrErrors.empty())
1175 {
1176 return 0;
1177 }
1178
1179 const size_t numberUsedErrors = min(validImagePoints, sqrErrors.size());
1180
1181 return averagedRobustError<tEstimator>(sqrErrors.data(), numberUsedErrors);
1182 }
1183}
1184
1185template <Estimator::EstimatorType tEstimator>
1186Scalar Error::approximatedAveragedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, IndexPairs32* correspondences)
1187{
1188 if (validImagePoints == 0)
1189 return 0;
1190
1191 ocean_assert(imagePoints && candidatePoints);
1192 ocean_assert(numberImagePoints <= numberCandidatePoints);
1193 ocean_assert(validImagePoints <= numberImagePoints);
1194
1195 if (numberImagePoints > numberCandidatePoints)
1196 {
1197 return Numeric::maxValue();
1198 }
1199
1200 ErrorElements errorElements;
1201 errorElements.reserve(numberImagePoints);
1202
1203 std::vector<unsigned char> usedCandidates(numberCandidatePoints, 0u);
1204
1205 for (unsigned int i = 0u; i < numberImagePoints; ++i)
1206 {
1207 const Vector2& imagePoint(imagePoints[i]);
1208
1209 Scalar minValue = Numeric::maxValue();
1210 unsigned int minIndex = 0xFFFFFFFF;
1211
1212 for (unsigned int c = 0u; c < numberCandidatePoints; ++c)
1213 {
1214 if (usedCandidates[c] == 0u)
1215 {
1216 const Scalar value = imagePoint.sqrDistance(candidatePoints[c]);
1217
1218 if (value < minValue)
1219 {
1220 minValue = value;
1221 minIndex = c;
1222 }
1223 }
1224 }
1225
1226 ocean_assert(minIndex != 0xFFFFFFFF);
1227
1228 errorElements.push_back(ErrorElement(i, minIndex, minValue));
1229
1230 ocean_assert(usedCandidates[minIndex] == 0u);
1231 usedCandidates[minIndex] = 1u;
1232 }
1233
1234 std::sort(errorElements.begin(), errorElements.end());
1235
1236 ocean_assert(!errorElements.empty());
1237
1238 if (errorElements.empty())
1239 {
1240 return 0;
1241 }
1242
1243 const size_t numberUsedErrors = min(validImagePoints, errorElements.size());
1244
1245 if constexpr (Estimator::isStandardEstimator<tEstimator>())
1246 {
1247 Scalar sqrErrors = 0;
1248
1249 if (correspondences)
1250 {
1251 for (size_t n = 0; n < numberUsedErrors; ++n)
1252 {
1253 sqrErrors += errorElements[n].error();
1254 correspondences->push_back(IndexPair32(errorElements[n].imageIndex(), errorElements[n].candidateIndex()));
1255 }
1256 }
1257 else
1258 {
1259 for (size_t n = 0; n < numberUsedErrors; ++n)
1260 {
1261 sqrErrors += errorElements[n].error();
1262 }
1263 }
1264
1265 return sqrErrors / Scalar(numberUsedErrors);
1266 }
1267 else
1268 {
1269 Scalars sqrErrors;
1270 sqrErrors.reserve(numberUsedErrors);
1271
1272 for (unsigned int n = 0; n < numberUsedErrors; ++n)
1273 {
1274 sqrErrors.push_back(errorElements[n].error());
1275
1276 if (correspondences)
1277 {
1278 correspondences->push_back(IndexPair32(errorElements[n].imageIndex(), errorElements[n].candidateIndex()));
1279 }
1280 }
1281
1282 return averagedRobustError<tEstimator>(sqrErrors.data(), numberUsedErrors);
1283 }
1284}
1285
1286template <Estimator::EstimatorType tEstimator>
1287Scalar Error::ambiguousAveragedRobustErrorInPointCloud(const Vector2* imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2* candidatePoints, const size_t numberCandidatePoints, IndexPairs32* correspondences)
1288{
1289 ocean_assert(imagePoints && candidatePoints);
1290 ocean_assert(numberImagePoints <= numberCandidatePoints);
1291 ocean_assert(validImagePoints <= numberImagePoints);
1292
1293 if (numberImagePoints > numberCandidatePoints)
1294 {
1295 return Numeric::maxValue();
1296 }
1297
1298 if (numberImagePoints == 0)
1299 {
1300 return 0;
1301 }
1302
1303 ErrorElements errorElements;
1304 errorElements.reserve(numberImagePoints);
1305
1306 for (unsigned int i = 0; i < numberImagePoints; ++i)
1307 {
1308 const Vector2& imagePoint(imagePoints[i]);
1309
1310 Scalar minValue = Numeric::maxValue();
1311 unsigned int minIndex = 0xFFFFFFFF;
1312
1313 for (unsigned int c = 0; c < numberCandidatePoints; ++c)
1314 {
1315 const Scalar value = imagePoint.sqrDistance(candidatePoints[c]);
1316
1317 if (value < minValue)
1318 {
1319 minValue = value;
1320 minIndex = c;
1321 }
1322 }
1323
1324 ocean_assert(minIndex != 0xFFFFFFFF);
1325
1326 errorElements.push_back(ErrorElement(i, minIndex, minValue));
1327 }
1328
1329 std::sort(errorElements.begin(), errorElements.end());
1330
1331 ocean_assert(!errorElements.empty());
1332
1333 if (errorElements.empty())
1334 {
1335 return 0;
1336 }
1337
1338 const size_t numberUsedErrors = min(validImagePoints, errorElements.size());
1339
1340 if constexpr (!Estimator::isStandardEstimator<tEstimator>())
1341 {
1342 Scalar sqrErrors = 0;
1343
1344 if (correspondences)
1345 {
1346 for (unsigned int n = 0; n < numberUsedErrors; ++n)
1347 {
1348 sqrErrors += errorElements[n].error();
1349 correspondences->push_back(IndexPair32(errorElements[n].imageIndex(), errorElements[n].candidateIndex()));
1350 }
1351 }
1352 else
1353 {
1354 for (unsigned int n = 0; n < numberUsedErrors; ++n)
1355 {
1356 sqrErrors += errorElements[n].error();
1357 }
1358 }
1359
1360 return sqrErrors / Scalar(numberUsedErrors);
1361 }
1362 else
1363 {
1364 Scalars sqrErrors;
1365 sqrErrors.reserve(numberUsedErrors);
1366
1367 if (correspondences)
1368 {
1369 for (unsigned int n = 0; n < numberUsedErrors; ++n)
1370 {
1371 sqrErrors.push_back(errorElements[n].error());
1372 correspondences->push_back(IndexPair32(errorElements[n].imageIndex(), errorElements[n].candidateIndex()));
1373 }
1374 }
1375 else
1376 {
1377 for (unsigned int n = 0; n < numberUsedErrors; ++n)
1378 {
1379 sqrErrors.push_back(errorElements[n].error());
1380 }
1381 }
1382
1383 return Geometry::Error::averagedRobustError<tEstimator>(sqrErrors.data(), numberUsedErrors);
1384 }
1385}
1386
1387template <Estimator::EstimatorType tEstimator>
1388Scalar Error::averagedRobustError(const Scalar* sqrErrors, const size_t number, const Scalar* explicitWeights)
1389{
1390 ocean_assert(sqrErrors);
1391 ocean_assert(number > 0);
1392
1393 if (number == 0)
1394 {
1395 return 0;
1396 }
1397
1398 // the number of model parameters is guessed to be 6
1399 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors, number, 6)) : 0;
1400
1401 Scalar summedError = 0;
1402
1403 if (explicitWeights)
1404 {
1405 const Scalar* const sqrErrorsEnd = sqrErrors + number;
1406 while (sqrErrors != sqrErrorsEnd)
1407 {
1408 summedError += Estimator::robustErrorSquare<tEstimator>(*sqrErrors++, sqrSigma) * *explicitWeights++;
1409 }
1410 }
1411 else
1412 {
1413 const Scalar* const sqrErrorsEnd = sqrErrors + number;
1414 while (sqrErrors != sqrErrorsEnd)
1415 {
1416 summedError += Estimator::robustErrorSquare<tEstimator>(*sqrErrors++, sqrSigma);
1417 }
1418 }
1419
1420 return summedError / Scalar(number);
1421}
1422
1423inline Scalar Error::averagedRobustError(const Scalar* sqrErrors, const size_t number, const Estimator::EstimatorType estimator, const Scalar* explicitWeights)
1424{
1425 switch (estimator)
1426 {
1428 return averagedRobustError<Estimator::ET_SQUARE>(sqrErrors, number, explicitWeights);
1429
1431 return averagedRobustError<Estimator::ET_LINEAR>(sqrErrors, number, explicitWeights);
1432
1434 return averagedRobustError<Estimator::ET_HUBER>(sqrErrors, number, explicitWeights);
1435
1437 return averagedRobustError<Estimator::ET_CAUCHY>(sqrErrors, number, explicitWeights);
1438
1440 return averagedRobustError<Estimator::ET_TUKEY>(sqrErrors, number, explicitWeights);
1441
1442 default:
1443 break;
1444 }
1445
1446 ocean_assert(false && "Invalid estimator type!");
1447 return averagedRobustError<Estimator::ET_SQUARE>(sqrErrors, number, explicitWeights);
1448}
1449
1450template <Estimator::EstimatorType tEstimator>
1451Scalar Error::averagedRobustError(const Scalar* sqrErrors, const unsigned int* indices, const size_t numberIndices, const Scalar* explicitWeights)
1452{
1453 ocean_assert(sqrErrors);
1454 ocean_assert(numberIndices > 0);
1455
1456 if (numberIndices == 0)
1457 {
1458 return 0;
1459 }
1460
1461 // the number of model parameters is guessed to be 6
1462 const Scalar sqrSigma = Estimator::needSigma<tEstimator>() ? Numeric::sqr(Estimator::determineSigmaSquare<tEstimator>(sqrErrors, indices, numberIndices, 6)) : 0;
1463
1464 Scalar summedError = 0;
1465
1466 if (explicitWeights)
1467 {
1468 const Scalar* const sqrErrorsEnd = sqrErrors + numberIndices;
1469 while (sqrErrors != sqrErrorsEnd)
1470 {
1471 const unsigned int index = *indices++;
1472 summedError += Estimator::robustErrorSquare<tEstimator>(sqrErrors[index], sqrSigma) * explicitWeights[index];
1473 }
1474 }
1475 else
1476 {
1477 const Scalar* const sqrErrorsEnd = sqrErrors + numberIndices;
1478 while (sqrErrors != sqrErrorsEnd)
1479 {
1480 summedError += Estimator::robustErrorSquare<tEstimator>(sqrErrors[*indices++], sqrSigma);
1481 }
1482 }
1483
1484 return summedError / Scalar(numberIndices);
1485}
1486
1487inline Scalar Error::averagedRobustError(const Scalar* sqrErrors, const unsigned int* indices, const size_t numberIndices, const Estimator::EstimatorType estimator, const Scalar* explicitWeights)
1488{
1489 switch (estimator)
1490 {
1492 return averagedRobustError<Estimator::ET_SQUARE>(sqrErrors, indices, numberIndices, explicitWeights);
1493
1495 return averagedRobustError<Estimator::ET_LINEAR>(sqrErrors, indices, numberIndices, explicitWeights);
1496
1498 return averagedRobustError<Estimator::ET_HUBER>(sqrErrors, indices, numberIndices, explicitWeights);
1499
1501 return averagedRobustError<Estimator::ET_CAUCHY>(sqrErrors, indices, numberIndices, explicitWeights);
1502
1504 return averagedRobustError<Estimator::ET_TUKEY>(sqrErrors, indices, numberIndices, explicitWeights);
1505
1506 default:
1507 break;
1508 }
1509
1510 ocean_assert(false && "Invalid estimator type!");
1511 return averagedRobustError<Estimator::ET_SQUARE>(sqrErrors, indices, numberIndices, explicitWeights);
1512}
1513
1514}
1515
1516}
1517
1518#endif // META_OCEAN_GEOMETRY_ERROR_H
This class implements the abstract base class for all AnyCamera objects.
Definition AnyCamera.h:130
virtual VectorT2< T > projectToImageIF(const VectorT3< T > &objectPoint) const =0
Projects a 3D object point into the camera frame.
virtual bool isValid() const =0
Returns whether this camera is valid.
static HomogenousMatrixT4< U > standard2InvertedFlipped(const HomogenousMatrixT4< U > &world_T_camera)
Transforms a standard homogenous 4x4 viewing (extrinsic camera) matrix into an inverted and flipped c...
Definition Camera.h:734
static bool isObjectPointInFrontIF(const HomogenousMatrixT4< T > &flippedCamera_T_world, const VectorT3< T > &objectPoint, const T epsilon=NumericT< T >::eps())
Determines whether a given 3D object point lies in front of a camera while the location of the camera...
Definition Camera.h:811
This class implements an element storing the error between to image points.
Definition Error.h:59
ErrorElement(const unsigned int imageIndex, const unsigned int candidateIndex, const Scalar error)
Creates a new error element.
Definition Error.h:627
unsigned int imageIndex() const
Returns the index of the image point.
Definition Error.h:635
Scalar error_
Error between the two points.
Definition Error.h:104
unsigned int candidateIndex() const
Return the index of the candidate point.
Definition Error.h:640
bool operator<(const ErrorElement &element) const
Returns whether the left element has a smaller error than the right one.
Definition Error.h:650
Scalar error() const
Returns the error between the two points.
Definition Error.h:645
This class implements to functions to determine the error or accuracy of geometric functions and thei...
Definition Error.h:34
static Scalar determineHomographyError(const SquareMatrix3 &points1_H_points0, const TAccessorImagePoints &imagePointAccessor0, const TAccessorImagePoints &imagePointAccessor1, Vector2 *errors=nullptr, Scalar *sqrErrors=nullptr)
Determines the accuracy of a given homography for a set of corresponding image points.
Definition Error.h:676
static Vector2 determinePoseError(const HomogenousMatrix4 &world_T_camera, const AnyCamera &camera, const Vector3 &objectPoint, const Vector2 &imagePoint)
Determines the accuracy of the camera pose based on 2D/3D correspondences.
Definition Error.h:767
static Scalar uniqueAveragedRobustErrorInPointCloud(const Vector2 *imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2 *candidatePoints, const size_t numberCandidatePoints, IndexPairs32 *correspondences=nullptr)
Determines the unique robust minimal average square error between two 2D points clouds.
Definition Error.h:1087
static Scalar determineAverageError(const SquareMatrix3 &firstTransformation, const Vectors2 &firstPoints, const SquareMatrix3 &secondTransformation, const Vectors2 &secondPoints)
Returns the average square error between two sets of 2D positions.
static void determineCameraError(const PinholeCamera &pinholeCamera, const Vector2 *normalizedObjectPoints, const Vector2 *imagePoints, const size_t correspondences, const bool useDistortionParameters, Scalar &sqrAveragePixelError, Scalar &sqrMinimalPixelError, Scalar &sqrMaximalPixelError)
Determines the accuracy of the intrinsic camera matrix (and camera distortion parameters if requested...
static Scalar determineAverageError(const Vectors3 &firstPoints, const Vectors3 &secondPoints)
Returns the average square error between two sets of 3D positions.
static Scalar determineCameraError(const PinholeCamera &pinholeCamera, const Vector2 *normalizedObjectPoints, const Vector2 *imagePoints, const size_t correspondences, const bool useDistortionParameters, Vector2 *errors=nullptr, Scalar *sqrErrors=nullptr)
Determines the accuracy of the intrinsic camera matrix (and camera distortion parameters if requested...
static void determineInvalidParameters(const Scalar *parameters, const size_t number, const Scalar threshold, Indices32 &validIndices)
Determines the indices of a set of given parameter values that are above a provided threshold.
static Scalar determineError(const Vectors2 &firstPoints, const Vectors2 &secondPoints, Scalar &sqrAverageError, Scalar &sqrMinimalError, Scalar &sqrMaximalError)
Determining the average, minimal and maximal square error between two sets of 2D positions.
static Scalar averagedRobustError(const Scalar *sqrErrors, const size_t number, const Scalar *explicitWeights=nullptr)
Returns the averaged robust error for a given set of error values using a defined estimator.
Definition Error.h:1388
static Scalar approximatedAveragedRobustErrorInPointCloud(const Vector2 *imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2 *candidatePoints, const size_t numberCandidatePoints, IndexPairs32 *correspondences=nullptr)
Determines the approximated robust minimal average square error between two 2D points clouds.
Definition Error.h:1186
static bool posesAlmostEqual(const HomogenousMatrix4 &poseFirst, const HomogenousMatrix4 &poseSecond, const Vector3 &maxTranslationOffset=Vector3(Scalar(0.1), Scalar(0.1), Scalar(0.1)), const Scalar maxOrientationOffset=Numeric::deg2rad(15))
Returns whether the offsets between two given 6DOF poses are below specified thresholds.
std::vector< ErrorElement > ErrorElements
Definition of a vector holding error elements.
Definition Error.h:110
static Scalar determineError(const Vectors3 &firstPoints, const Vectors3 &secondPoints, Scalar &sqrAverageError, Scalar &sqrMinimalError, Scalar &sqrMaximalError)
Returns the average, minimal and maximal square error between two sets of 3D positions.
ErrorDetermination
Definition of different error determination stages.
Definition Error.h:41
@ ED_APPROXIMATED
Approximated error determination.
Definition Error.h:47
@ ED_INVALID
Invalid stage.
Definition Error.h:43
@ ED_AMBIGUOUS
Ambiguous error determination.
Definition Error.h:49
@ ED_UNIQUE
Unique error determination.
Definition Error.h:45
static Scalar determineAverageError(const Vectors2 &firstPoints, const Vectors2 &secondPoints, Vector2 *errors=nullptr, Scalar *sqrErrors=nullptr)
Returns the average square error between two sets of 2D positions.
static Scalar averagedRobustErrorInPointCloud(const Vector2 *imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2 *candidatePoints, const size_t numberCandidatePoints, const ErrorDetermination errorDetermination, IndexPairs32 *correspondences=nullptr)
Determines the unique robust minimal average square error between two 2D points clouds.
Definition Error.h:1061
static Scalar ambiguousAveragedRobustErrorInPointCloud(const Vector2 *imagePoints, const size_t numberImagePoints, const size_t validImagePoints, const Vector2 *candidatePoints, const size_t numberCandidatePoints, IndexPairs32 *correspondences=nullptr)
Determines the ambiguous robust minimal average square error between two 2D points clouds.
Definition Error.h:1287
static void determineValidParameters(const Scalar *parameters, const size_t number, const Scalar threshold, Indices32 &validIndices)
Determines the indices of a set of given parameter values that are below ore equal to a provided thre...
static Vector2 determinePoseErrorIF(const HomogenousMatrix4 &flippedCamera_T_world, const AnyCamera &camera, const Vector3 &objectPoint, const Vector2 &imagePoint)
Determines the accuracy of the camera pose based on 2D/3D correspondences.
Definition Error.h:772
EstimatorType
Definition of individual robust estimator types.
Definition Estimator.h:34
@ ET_HUBER
The Huber estimator type.
Definition Estimator.h:84
@ ET_TUKEY
The Tukey estimator.
Definition Estimator.h:102
@ ET_SQUARE
The standard square error estimator (L2).
Definition Estimator.h:52
@ ET_LINEAR
The linear estimator (L1).
Definition Estimator.h:66
@ ET_CAUCHY
The Cauchy estimator.
Definition Estimator.h:118
VectorT3< T > translation() const
Returns the translation of the transformation.
Definition HomogenousMatrix4.h:1381
bool isValid() const
Returns whether this matrix is a valid homogeneous transformation.
Definition HomogenousMatrix4.h:1806
QuaternionT< T > rotation() const
Returns the rotation of the transformation as quaternion.
Definition HomogenousMatrix4.h:1388
static T abs(const T value)
Returns the absolute value of a given value.
Definition Numeric.h:1220
static constexpr T eps()
Returns a small epsilon.
static constexpr T sqr(const T value)
Returns the square of a given value.
Definition Numeric.h:1495
static T cos(const T value)
Returns the cosine of a given value.
Definition Numeric.h:1584
static constexpr bool isNotEqualEps(const T value)
Returns whether a value is not smaller than or equal to a small epsilon.
Definition Numeric.h:2237
static constexpr T maxValue()
Returns the max scalar value.
Definition Numeric.h:3244
bool hasDistortionParameters() const
Returns whether this camera object has specified distortion parameters.
Definition PinholeCamera.h:1293
const SquareMatrixT3< T > & intrinsic() const
Returns the intrinsic camera matrix.
Definition PinholeCamera.h:1257
HomogenousMatrixT4< T > transformationMatrixIF(const HomogenousMatrixT4< T > &iFlippedExtrinsic, const T zoom=T(1)) const
Returns a 4x4 homogenous transformation matrix (corresponding to a 3x4 matrix) that covers an extrins...
Definition PinholeCamera.h:1479
VectorT2< T > projectToImageIF(const HomogenousMatrixT4< T > &flippedCamera_T_world, const VectorT3< T > &objectPoint, const bool distortImagePoint, const T zoom=T(1)) const
Projects a 3D object point to the 2D image plane of the camera by a given inverse camera pose.
Definition PinholeCamera.h:1816
T cos2(const QuaternionT< T > &quaternion) const
Returns the cosine value of the half angle between two quaternion rotations.
Definition Quaternion.h:857
bool multiply(const VectorT2< T > &vector, VectorT2< T > &result) const
Multiplies a 2D vector with this matrix (from the right).
Definition SquareMatrix3.h:1708
bool isSingular() const
Returns whether this matrix is singular (and thus cannot be inverted).
Definition SquareMatrix3.h:1341
T sqr() const
Returns the square of the vector length.
Definition Vector2.h:633
T sqrDistance(const VectorT2< T > &right) const
Returns the square distance between this 2D position and a second 2D position.
Definition Vector2.h:645
const T & y() const noexcept
Returns the y value.
Definition Vector3.h:824
const T & x() const noexcept
Returns the x value.
Definition Vector3.h:812
const T & z() const noexcept
Returns the z value.
Definition Vector3.h:836
std::vector< IndexPair32 > IndexPairs32
Definition of a vector holding 32 bit index pairs.
Definition Base.h:144
std::vector< Index32 > Indices32
Definition of a vector holding 32 bit index values.
Definition Base.h:96
std::pair< Index32, Index32 > IndexPair32
Definition of a pair holding 32 bit indices.
Definition Base.h:138
float Scalar
Definition of a scalar type.
Definition Math.h:129
std::vector< Scalar > Scalars
Definition of a vector holding Scalar objects.
Definition Math.h:145
std::vector< Vector2 > Vectors2
Definition of a vector holding Vector2 objects.
Definition Vector2.h:64
std::vector< Vector3 > Vectors3
Definition of a vector holding Vector3 objects.
Definition Vector3.h:65
VectorT2< Scalar > Vector2
Definition of a 2D vector.
Definition Vector2.h:28
The namespace covering the entire Ocean framework.
Definition Accessor.h:15