Ocean
OculusTagTracker.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  *
4  * This source code is licensed under the MIT license found in the
5  * LICENSE file in the root directory of this source tree.
6  */
7 
8 #ifndef META_OCEAN_TRACKING_OCULUSTAGS_OCULUSTAGTRACKER_H
9 #define META_OCEAN_TRACKING_OCULUSTAGS_OCULUSTAGTRACKER_H
10 
13 
14 #include "ocean/base/Frame.h"
15 
16 #include "ocean/cv/FramePyramid.h"
17 
18 #include "ocean/math/AnyCamera.h"
19 
21 
23 
24 namespace Ocean
25 {
26 
27 namespace Tracking
28 {
29 
30 namespace OculusTags
31 {
32 
33 /**
34  * This class implements a detector and tracker for Oculus Tags.
35  * @ingroup trackingoculustags
36  */
37 class OCEAN_TRACKING_OCULUSTAGS_EXPORT OculusTagTracker
38 {
39  public:
40 
41  /**
42  * Definition of the states of a tracked tag
43  */
45  {
46  /// Unknown or invalid state
47  TS_UNKNOWN_STATE = 0,
48 
49  // A newly detected tag
51 
52  /// A tag that is currently being tracked
54 
55  /// Denotes known tag which are currently not tracked, e.g., because they occluded or out of view
56  TS_NOT_TRACKING
57  };
58 
59  /**
60  * Definition of the motion types of a tracked tag
61  */
63  {
64  /// The motion is not known
65  MT_UNKNOWN = 0,
66 
67  // The motion dynamic, i.e., the tag is moving relative to the camera(s) (e.g., a tag on a moving object)
69 
70  /// The motion is static, i.e., the tag does not move relative to the camera(s) (e.g., a tag that is glued to a wall)
72  };
73 
74  /**
75  * Definition of groups of object corners on a tag.
76  *
77  * The corners points of a tag are defined as follows:
78  * @code
79  * 0---+---+---+---+---+---+---+---3
80  * | # | # | # | # | # | # | # | # |
81  * +---4---+---+---+---+---D---+---+
82  * | # | | # | # |
83  * +---+ X---X---X---X---C---B---+
84  * | # | | = | = | = | = | | # |
85  * +---+ X---X---X---X---X +---+
86  * | # | | = | = | = | = | | # |
87  * +---+ X---X---X---X---X +---+
88  * | # | | = | = | = | = | | # |
89  * +---+ X---X---X---X---X +---+
90  * | # | | = | = | = | = | | # |
91  * +---5---6---X---X---X---9---A---+
92  * | # | # | | # | # |
93  * +---+---7---+---+---+---8---+---+
94  * | # | # | # | # | # | # | # | # |
95  * 1---+---+---+---+---+---+---+---2
96  * @endcode
97  *
98  * There are additional corners (denoted by `X`) in the data matrix (denoted by `=`); their number and locations
99  * depend on the specific ID of the tag at hand.
100  *
101  * @sa getTagObjectPoints()
102  */
103  enum TagPointGroup : uint8_t
104  {
105  /// The corners 0 through 3
106  TPG_CORNERS_0_TO_3 = 1u << 0u,
107 
108  /// The corners 4 through D
109  TPG_CORNERS_4_TO_D = 1u << 1u,
110 
111  /// The corners 0 through D
112  TPG_CORNERS_0_TO_D = TPG_CORNERS_0_TO_3 | TPG_CORNERS_4_TO_D,
113 
114  /// The corners in the data matrix of a tag (including the corners 6, 9, and C)
115  TPG_CORNERS_IN_DATA_MATRIX = 1u << 2u,
116 
117  /// All available corners
118  TPG_CORNERS_ALL_AVAILABLE = TPG_CORNERS_0_TO_D | TPG_CORNERS_IN_DATA_MATRIX,
119 
120  /// The center locations of the modules in the data matrix (`=` in the figure above)
121  TPG_DATA_MATRIX_MODULE_CENTERS = 1u << 3u,
122 
123  /// The centers of those modules on the outermost border of a tag, which are guaranteed to always have foreground values
124  TPG_FOREGROUND_MODULE_CENTERS = 1u << 4u,
125 
126  /// The centers of those modules between the outermost border and a tag and the data matrix, which are guaranteed to always have background values
127  TPG_BACKGROUND_MODULE_CENTERS = 1u << 5u,
128 
129  /// The centers of those modules that are required to determine the orientation of a tag
130  TPG_ORIENTATION_MODULE_CENTERS = 1u << 6u,
131  };
132 
133 
134  /**
135  * A history of tag observations for one camera
136  * Each observation consists of a camera pose, a set of object points, and a set of corresponding image points
137  */
138  class OCEAN_TRACKING_OCULUSTAGS_EXPORT TagObservationHistory
139  {
140  public:
141 
142  /**
143  * Adds a single observation of a tag to the collection
144  * @param world_T_camera The world-to-camera transformations, must be valid
145  * @param objectPoints The object points of the tag (in reference frame of the tag), must have at least one element
146  * @param imagePoints The image points corresponding to the object points at the given camera pose, size must be identical to number of object points
147  * @param trackingImagePoints The image points that should be used to track this tag from frame to frame (temporally), must have at least the four outer corners of the tag and the same size as `trackingObjectPoints`
148  * @param trackingObjectPoints The object points that should be used to track this tag from frame to frame (temporally), must have at least the four outer corners of the tag and the same size as `trackingImagePoints`
149  */
150  inline void addObservation(const HomogenousMatrix4& world_T_camera, Vectors3&& objectPoints, Vectors2&& imagePoints, Vectors2&& trackingImagePoints, Vectors3&& trackingObjectPoints);
151 
152  /**
153  * Appends a collection of observations
154  * @param observationHistory The observations that will be appended, note: the other instance will be empty afterwards
155  */
156  inline void append(TagObservationHistory& observationHistory);
157 
158  /**
159  * Remove all observations in that do not match the specified pose
160  * This function starts with the newest observation stored in this instance and works its way back in time. It stops
161  * at the first observation that exceeds the maximum projection error and removes it as well as everything before it
162  * (older observation).
163  * @param anyCamera The camera that was use for capturing, must be valid
164  * @param tag_T_world The pose of tag as the transformation which converts points in the world reference frame to the tag reference frame, must be valid
165  * @param maximalError Maximum projection error when applying the above tag pose to all stored observations, range: [0, infinity)
166  * @param maximalOutliersPercent Maximum percentage of points that may exceed the maximum projection error before the entire observation is rejected, range: [0, 1]
167  * @return The number of observations that have been removed, range: [0, size()]
168  */
169  size_t removeObservations(const AnyCamera& anyCamera, const HomogenousMatrix4& tag_T_world, const Scalar maximalError, const Scalar maximalOutliersPercent = Scalar(0.1));
170 
171  /**
172  * Returns the number of observations
173  * @return The number of observations stored in this instance
174  */
175  inline size_t size() const;
176 
177  /**
178  * Returns true if no observations are stored in this instance
179  * @return True if empty, otherwise false
180  */
181  inline bool empty() const;
182 
183  /**
184  * Removes all stored observations stored in this instance
185  */
186  inline void clear();
187 
188  /**
189  * Returns the world-to-camera transformations
190  * @return A reference to the world-to-camera transformations
191  */
192  inline const HomogenousMatrices4& cameraPoses_world_T_camera() const;
193 
194  /**
195  * Returns the world-to-camera transformations
196  * @return A reference to the world-to-camera transformations
197  */
198  inline HomogenousMatrices4& cameraPoses_world_T_camera();
199 
200  /**
201  * Returns the objects points stored in the instance
202  * @return A reference to the object points
203  */
204  inline const Geometry::ObjectPointGroups& objectPointsGroups() const;
205 
206  /**
207  * Returns the objects points stored in the instance
208  * @return A reference to the object points
209  */
210  inline Geometry::ObjectPointGroups& objectPointsGroups();
211 
212  /**
213  * Returns the image points stored in this instance
214  * @return A reference to the image points
215  */
216  inline const Geometry::ImagePointGroups& imagePointsGroups() const;
217 
218  /**
219  * Returns the image points stored in this instance
220  * @return A reference to the image points
221  */
222  inline Geometry::ImagePointGroups& imagePointsGroups();
223 
224  /**
225  * Returns the tracking object points stored in this instance
226  * @return A reference to the tracking object points
227  */
228  inline const Geometry::ObjectPointGroups& trackingObjectPointsGroups() const;
229 
230  /**
231  * Returns the tracking points stored in this instance
232  * @return A reference to the tracking points
233  */
234  inline Geometry::ObjectPointGroups& trackingObjectPointsGroups();
235 
236  /**
237  * Returns the tracking image points stored in this instance
238  * @return A reference to the tracking image points
239  */
240  inline const Geometry::ImagePointGroups& trackingImagePointsGroups() const;
241 
242  /**
243  * Returns the tracking image points stored in this instance
244  * @return A reference to the tracking image points
245  */
246  inline Geometry::ImagePointGroups& trackingImagePointsGroups();
247 
248  /**
249  * Returns the latest object points that should be used for tracking
250  * @return A reference to the latest tracking object points
251  */
252  inline const Vectors3& latestTrackingObjectPoints() const;
253 
254  /**
255  * Returns the latest image points that should be used for tracking
256  * @return A reference to the latest tracking image points
257  */
258  inline const Vectors2& latestTrackingImagePoints() const;
259 
260  /**
261  * Monoscopic optimization of the world-to-tag transformation using the stored object-image point correspondences
262  * @param anyCamera The camera that was use for capturing, must be valid
263  * @param unoptimized_world_T_tag The initial estimate of the world-to-tag transformation, must valid
264  * @param optimized_world_T_tag The optimized world-to-tag transformation
265  * @return True if the pose optimization was successful, otherwise false
266  */
267  bool optimizePose(const AnyCamera& anyCamera, const HomogenousMatrix4& unoptimized_world_T_tag, HomogenousMatrix4& optimized_world_T_tag);
268 
269  /**
270  * Stereoscopic optimization of the world-to-tag transformation using the stored object-image point correspondences
271  * @param anyCameraA The first camera that captured the first set of observations, must be valid
272  * @param anyCameraB The second camera that captured the second set of observations, must be valid
273  * @param tagObservationHistoryA The first set of observations, must not be empty
274  * @param tagObservationHistoryB The second set of observation, must not be empty
275  * @param unoptimized_world_T_tag The initial estimate of the world-to-tag transformation, must valid
276  * @param optimized_world_T_tag The optimized world-to-tag transformation
277  * @return True if the pose optimization was successful, otherwise false
278  */
279  static bool optimizePose(const AnyCamera& anyCameraA, const AnyCamera& anyCameraB, const TagObservationHistory& tagObservationHistoryA, const TagObservationHistory& tagObservationHistoryB, const HomogenousMatrix4& unoptimized_world_T_tag, HomogenousMatrix4& optimized_world_T_tag);
280 
281  public:
282 
283  /// The world-to-camera transformations
285 
286  /// The object points of the tag (in reference frame of the tag)
288 
289  /// The image points corresponding to the object points at the given camera pose
291 
292  /// The image points that should be used to track the tag from frame to frame (temporally)
294 
295  /// The image points that should be used to track the tag from frame to frame (temporally)
297  };
298 
299  /// A vector of tag observation histories
300  typedef std::vector<TagObservationHistory> TagObservationHistories;
301 
302  /**
303  * This class organizes the information of tracked tags
304  */
306  {
307  public:
308 
309  /**
310  * Copy constructor
311  * @param otherTrackedTag The other instance that will be cloned
312  */
313  inline TrackedTag(const TrackedTag& otherTrackedTag);
314 
315  /**
316  * Move constructor
317  * @param otherTrackedTag The other instance that will be moved
318  */
319  inline TrackedTag(TrackedTag&& otherTrackedTag);
320 
321  /**
322  * Constructor
323  * @param tag The tag that will be stored and managed, must be valid
324  * @param tagObservationHistoryA The observations from the first camera, must not be empty if `tagObservationsB` is empty
325  * @param tagObservationHistoryB The observations from the second camera, must not be empty if `tagObservationsA` is empty
326  * @param trackingState The tracking state of the tag
327  * @param motionType The motion type of the tag
328  */
329  inline TrackedTag(OculusTag&& tag, TagObservationHistory&& tagObservationHistoryA, TagObservationHistory&& tagObservationHistoryB, const TrackingState trackingState, const MotionType motionType);
330 
331  /**
332  * Smaller-than comparator for tracked tag instances
333  * @param otherTrackedTag The other instance that will be compared against instance, must be valid
334  * @return True if the ID of tag stored in this instance is smaller than the ID of the tag stored in the other instance, otherwise false
335  */
336  inline bool operator<(const TrackedTag& otherTrackedTag) const;
337 
338  /**
339  * Move-Assign operator
340  * @param otherTrackedTag The other instance of which all data will transferred to this instance; the other instance will be invalid afterwards
341  */
342  inline TrackedTag& operator=(TrackedTag&& otherTrackedTag);
343 
344  public: // TODO Make the section below protected and add setters/getters (later once API is more stable)
345 
346  /// The tag that is managed
348 
349  // The observations of `tag_` in the first camera
351 
352  /// The observations of `tag_` in the second camera
354 
355  /// The tracking state of the managed tag
357 
358  /// The motion type of the managed tag
360  };
361 
362  /// A map to store all tracked tags
363  typedef std::unordered_map<uint32_t, TrackedTag> TrackedTagMap;
364 
365  /// A vector of tracked tags.
366  typedef std::vector<TrackedTag> TrackedTags;
367 
368  public:
369 
370  /**
371  * Default constructor.
372  */
374 
375  /**
376  * Move constructor.
377  * @param oculusTagTracker Other instance that will be moved to this instance; the other instance will be invalid afterwards
378  */
379  OculusTagTracker(OculusTagTracker&& oculusTagTracker);
380 
381  /**
382  * Destructor
383  */
384  virtual ~OculusTagTracker();
385 
386  /**
387  * Move-Assign operator
388  * @param oculusTagTracker The other instance of which all data will transferred to this instance; the other instance will be invalid afterwards
389  */
391 
392  /**
393  * Tracks tags to the current stereo frames
394  * @param anyCameraA The first camera with which the first input image has been recorded, must be valid
395  * @param anyCameraB The second camera with which the second input image has been recorded, must be valid
396  * @param yFrameA The first input image in which Oculus tags will be searched, must be valid
397  * @param yFrameB The first input image in which Oculus tags will be searched, must be valid
398  * @param world_T_device The transformation that converts device points to world points, must be valid
399  * @param device_T_cameraA The transformation that converts points in the first camera to device points, must be valid
400  * @param device_T_cameraB The transformation that converts points in the second camera to device points, must be valid
401  * @param tags The vectors of tags that are visible in the provided stereo frame
402  * @return True if tags found in the provided stereo frame, otherwise false
403  */
404  bool trackTagsStereo(const AnyCamera& anyCameraA, const AnyCamera& anyCameraB, const Frame& yFrameA, const Frame& yFrameB, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_cameraA, const HomogenousMatrix4& device_T_cameraB, OculusTags& tags);
405 
406  /**
407  * Returns the map of tracked tags
408  * @return A reference to the map of tracked tags
409  */
410  inline const TrackedTagMap& trackedTagMap() const;
411 
412  /**
413  * Creates a rectified image of a tag for visualization
414  * @param anyCameraA The first camera with which the first input image has been recorded, must be valid
415  * @param anyCameraB The second camera with which the second input image has been recorded, must be valid
416  * @param yFrameA The first input image in which Oculus tags will be searched, must be valid
417  * @param yFrameB The first input image in which Oculus tags will be searched, must be valid
418  * @param world_T_device The transformation that converts device points to world points, must be valid
419  * @param device_T_cameraA The transformation that converts points in the first camera to device points, must be valid
420  * @param device_T_cameraB The transformation that converts points in the second camera to device points, must be valid
421  * @param tag The tag for which the image will be rectified
422  * @param rectifiedFrame The frame will hold the rectified image of the tag, will have the same pixel format as the input and have the size `rectifiedFrameSize` x `rectifiedFrameSize`
423  * @param rectifiedFrameSize Optional size of the rectified frame, range: [1, infinity)
424  * @return True if the creation of the rectified frame was successful, otherwise false
425  */
426  static bool extractRectifiedTagImage(const AnyCamera& anyCameraA, const AnyCamera& anyCameraB, const Frame& yFrameA, const Frame& yFrameB, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_cameraA, const HomogenousMatrix4& device_T_cameraB, const OculusTag& tag, Frame& rectifiedFrame, const uint32_t rectifiedFrameSize = 128u);
427 
428  /**
429  * Detect tags in a grayscale frame
430  * @param anyCamera The camera with which the input image has been recorded, must be valid
431  * @param yFrame The input image in which Oculus tag will be search, must be valid
432  * @param world_T_device The transformation that converts device points to world points, must be valid
433  * @param device_T_camera The transformation that converts points camera to device points, must be valid
434  * @param defaultTagSize The edge length of all detected tags that are not specified in `tagSizeMap`, range: (0, infinity)
435  * @param tagSizeMap Optional mapping of tag IDs to specific tag sizes, range of tag IDs (key): [0, 1024), range of tag sizes (value): (0, infinity)
436  * @param tagObservationHistories Optional return value holding the tag observations (2D-3D point correspondences)
437  * @return The detected tags
438  */
439  static OculusTags detectTagsMono(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_camera, const Scalar defaultTagSize, const TagSizeMap& tagSizeMap = TagSizeMap(), TagObservationHistories* tagObservationHistories = nullptr);
440 
441  /**
442  * Locates a detected tag in a different camera image, e.g., the second camera of a stereo camera
443  * @note The camera specified here must be mounted to the same device, i.e., use the `world_T_device` transformation as the tag
444  * @param anyCamera The camera with which the input image has been recorded, must be valid
445  * @param yFrame The input image in which Oculus tag will be search, must be valid
446  * @param world_T_device The transformation that converts device points to world points, must be valid
447  * @param device_T_camera The transformation that converts points camera to device points, must be valid
448  * @param tag The tag that has been detected in a different camera and which will be located in the given camera if possible, must be valid
449  * @param tagObservationHistory The result holding the observation the tag for the provided camera
450  * @return True if the tag has been located in the provided camera, otherwise false
451  */
452  static bool locateTagInCamera(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_camera, const OculusTag& tag, TagObservationHistory& tagObservationHistory);
453 
454  /**
455  * Assembles specific points on a tag in the tag object space
456  * @param tagPointGroup Indicates which points should be returned
457  * @param tagSize The size of the tag for which the corners are computed, range: (0, infinity)
458  * @param dataMatrix The data matrix of the matrix; only needs to be specified if corners inside the area of the data matrix should be generated (can otherwise be ignored)
459  * @return The generated corners in the object space
460  */
461  static Vectors3 getTagObjectPoints(const TagPointGroup tagPointGroup, const Scalar tagSize, const OculusTag::DataMatrix dataMatrix = 0u);
462 
463  /**
464  * Expands the data matrix of a tag to a vector of binary module values
465  * @param dataMatrix The data matrix of a tag that will be expanded
466  * @return A vector containing the values of all 64 modules of a tag
467  */
468  static std::vector<uint8_t> generateModuleValues(const OculusTag::DataMatrix& dataMatrix);
469 
470  /**
471  * Checks if a valid tag is fully visible in the frame of a camera
472  * @param anyCamera The camera for which will be checked if the tag is fully visible for it, must be valid
473  * @param tag_T_camera The transformation that maps tag object points to camera object coordinates, must be valid
474  * @param tagSize The size of that tag, range: (0, infinity)
475  * @param signedBorder The optional border increasing or decreasing the rectangle in which the tag corners must be located inside the camera frame, in pixels, range: (-infinity, std::min(anyCamera.width(), anyCamera.height()) / 2)
476  * @return True if the tag is valid and fully visible in the frame of the camera, otherwise false
477  */
478  static bool isTagVisible(const AnyCamera& anyCamera, const HomogenousMatrix4& tag_T_camera, const Scalar tagSize, const Scalar signedBorder);
479 
480  protected:
481 
482  /**
483  * Tracks 2D-3D correspondences to temporally from frame to frame
484  * @param anyCamera The camera with which the current and previous frame (pyramids) have been recorded, must be valid
485  * @param framePyramid The frame pyramid of the current frame, must be valid
486  * @param previousFramePyramid The frame pyramid of the previous frame, must be valid
487  * @param world_T_camera The transformation that converts device points to world points at the current time stamp, must be valid
488  * @param previous_world_T_tag The transformation that converts device points to world points at the previous time stamp, must be valid
489  * @param objectPoints The object points that corresponds to the previous image points, must have the same size as `previousImagePoints`
490  * @param previousImagePoints The image location of the tag at the previous time stamp, must have at least four elements (the outer corners)
491  * @param imagePoints The image locations of the tracked points at the current time stamp, will have the same size as `previousImagePoints`
492  * @return True if the points were tracked successfully, otherwise false
493  */
494  static bool trackTagCornersTemporally(const AnyCamera& anyCamera, const CV::FramePyramid& framePyramid, const CV::FramePyramid& previousFramePyramid, const HomogenousMatrix4& world_T_camera, const HomogenousMatrix4& previous_world_T_tag, const Vectors3& objectPoints, const Vectors2& previousImagePoints, Vectors2& imagePoints);
495 
496  /**
497  * Detects Oculus Tags in stereo images.
498  * @param anyCameras The camera models that correspond to the input images, must have two valid elements
499  * @param yFrames The 8-bit grayscale images in which the tags will be detected, must have two valid elements
500  * @param world_T_device The world pose of the device, must be valid
501  * @param device_T_cameras The device poses of the all cameras, must have two valid elements
502  * @return The detected tags.
503  */
504  static TrackedTags detectTagsStereo(const SharedAnyCameras& anyCameras, const Frames& yFrames, const HomogenousMatrix4& world_T_device, const HomogenousMatrices4& device_T_cameras);
505 
506  /**
507  * Reads the tag information from an image given the locations of its four outer corners
508  * @param anyCamera The camera with which the input image has been recorded, must be valid
509  * @param yFrame The frame from which the tag will be read, must be valid
510  * @param unorientedQuad The four corners of the tag, must be in counter-clockwise order and fully inside image
511  * @param world_T_device The transformation that converts device points to world points, must be valid
512  * @param device_T_camera The transformation from the camera to the device, must be valid
513  * @param defaultTagSize The default edge length of the detected tag, range: (0, infinity)
514  * @param tag The tag instance initialized with the information from the tag in the image, only valid if this function returns `true`
515  * @param quad The four outer corners of the tag in counter-clockwise order starting with the top-left corner
516  * @param tagSizeMap Optional mapping of tag IDs to specific tag sizes, range of tag IDs (key): [0, 1024), range of tag sizes (value): (0, infinity)
517  * @return True, if a tag at the specified location could be read, otherwise false
518  */
519  static bool readTag(const AnyCamera& anyCamera, const Frame& yFrame, const QuadDetector::Quad& unorientedQuad, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_camera, const Scalar defaultTagSize, OculusTag& tag, QuadDetector::Quad& quad, const TagSizeMap& tagSizeMap = TagSizeMap());
520 
521  /**
522  * Determines the reflectance type of an tag candidate and the intensity threshold between foreground and background
523  * @param anyCamera The camera with which the input image has been recorded, must be valid
524  * @param yFrame The grayscale image in which the tag candidate has been found, must be valid
525  * @param tag_T_camera The 6DOF pose of the tag relative to the camera coordinates at detection time
526  * @param tagSize The size of the corresponding tag, range: (0, infinity)
527  * @param reflectanceType The reflectance type of the tag candidate that has been determined
528  * @param intensityThreshold The intensity value that can be used to separate foreground and background pixel values
529  * @param moduleValueDark The binary value of dark modules, can be `1` or `0`
530  * @return True if the computation was successful, otherwise false
531  */
532  static bool determineReflectanceTypeAndIntensityThreshold(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& tag_T_camera, const Scalar tagSize, OculusTag::ReflectanceType& reflectanceType, uint8_t& intensityThreshold, uint8_t& moduleValueDark);
533 
534  /**
535  * Determines the top-left corner of the tag candidate
536  * @param anyCamera The camera with which the input image has been recorded, must be valid
537  * @param yFrame The grayscale image in which the tag candidate has been found, must be valid
538  * @param unorientedQuad The boundary pattern/tag candidate
539  * @param unorientedTag_T_camera The 6DOF pose of the tag relative to the camera coordinates at detection time
540  * @param tagSize The size of the corresponding tag, range: (0, infinity)
541  * @param orientedQuad Rotated version of `boundaryPattern` where the first element corresponds to the top-left corner of the tag candidate
542  * @param orientedTag_T_camera The correctly rotated (oriented) 6DOF pose of the tag relative to the camera so that the origin is the in top-left corner of the tag
543  * @param intensityThreshold The intensity value that can be used to separate foreground and background pixel values
544  * @param binaryModuleValueDark The binary value of dark modules, can be `1` or `0`
545  * @return True if the computation was successful, otherwise false
546  */
547  static bool determineOrientation(const AnyCamera& anyCamera, const Frame& yFrame, const QuadDetector::Quad& unorientedQuad, const HomogenousMatrix4& unorientedTag_T_camera, const Scalar tagSize, QuadDetector::Quad& orientedQuad, HomogenousMatrix4& orientedTag_T_camera, const uint8_t& intensityThreshold, const uint8_t& binaryModuleValueDark);
548 
549  /**
550  * Reads the modules from the data matrix of a tag
551  * @param anyCamera The camera with which the input image has been recorded, must be valid
552  * @param yFrame The grayscale image in which the tag candidate has been found, must be valid
553  * @param tag_T_camera The 6DOF pose of the tag relative to the camera coordinates at detection time
554  * @param tagSize The size of the corresponding tag, range: (0, infinity)
555  * @param intensityThreshold The intensity value that can be used to separate foreground and background pixel values
556  * @param binaryModuleValueDark The binary value of dark modules, can be `1` or `0`
557  * @param binaryModuleValueLight binary value of light modules, can be `1` or `0`
558  * @param dataMatrix Will store the modules that were read from a tag
559  * @return True if the computation was successful, otherwise false
560  */
561  static bool readDataMatrix(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& tag_T_camera, const Scalar tagSize, const uint8_t& intensityThreshold, const uint8_t& binaryModuleValueDark, const uint8_t& binaryModuleValueLight, OculusTag::DataMatrix& dataMatrix);
562 
563  /**
564  * Determines a set 2D-3D corner correspondences of a tag
565  * @param anyCamera The camera with which the input image has been recorded, must be valid
566  * @param yFrame The grayscale image in which the tag candidate has been found, must be valid
567  * @param world_T_device The transformation that converts device points to world points, must be valid
568  * @param device_T_camera The transformation that converts points in the camera to device points, must be valid
569  * @param tag The tag for which the observation corners will be determined, must be valid
570  * @param quad The image locations of the four outer corners of the tag, must be fully inside the image
571  * @param tagObservationHistory The observations to which the new observation will be added
572  * @return True if a set of correspondences was added, otherwise false
573  */
574  static bool addTagObservation(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_camera, const OculusTag& tag, const QuadDetector::Quad& quad, TagObservationHistory& tagObservationHistory);
575 
576  /**
577  * Determines 2D-3D corner correspondences of a tag and optimizes the tag pose based on them
578  * @note The optimization will use all the observations stored in `tagObservations` for the pose optimization
579  * @param anyCamera The camera with which the input image has been recorded, must be valid
580  * @param yFrame The grayscale image in which the tag candidate has been found, must be valid
581  * @param world_T_device The transformation that converts device points to world points, must be valid
582  * @param device_T_camera The transformation that converts points in the camera to device points, must be valid
583  * @param tag The tag for which the observation corners will be determined, must be valid
584  * @param quad The image locations of the four outer corners of the tag, must be fully inside the image
585  * @param tagObservationHistory The observations to which the new observation will be added
586  * @return True if a set of correspondences was added, otherwise false
587  */
588  static bool addTagObservationAndOptimize(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_camera, OculusTag& tag, const QuadDetector::Quad& quad, TagObservationHistory& tagObservationHistory);
589 
590  /**
591  * Computes the 6DOF pose of the tag relative to the location of the camera using 3D-to-2D point correspondences
592  * @param anyCamera The camera with which the input image has been recorded, must be valid
593  * @param imagePoints The 2D image points, size: [4, infinity)
594  * @param objectPoints The corresponding 3D object points in the reference frame of the tag, size: must be identical to `imagePoints`
595  * @param object_T_camera The 6DOF pose of the tag relative to the camera
596  * @param minPoints Minimum number of points used in each RANSAC iteration, range: [4, imagePoints.size())
597  * @return True if the pose has been successfully computed, otherwise false
598  */
599  static bool computePose(const AnyCamera& anyCamera, const Vectors2& imagePoints, const Vectors3& objectPoints, HomogenousMatrix4& object_T_camera, const uint32_t minPoints = 4u);
600 
601  /**
602  * Confirms tag in image given a valid tag instance
603  * @param anyCamera The camera with which the input image has been recorded, must be valid
604  * @param yFrame he grayscale image in which the tag will be confirmed, must be valid
605  * @param world_T_device The transformation that converts device points to world points, must be valid
606  * @param device_T_camera The transformation that converts points in the camera to device points, must be valid
607  * @param tag The tag which will be confirmed in the input image, must be valid
608  * @return True if the tag was found at the specified location in the image, otherwise false
609  */
610  static bool confirmDetectionInFrame(const AnyCamera& anyCamera, const Frame& yFrame, const HomogenousMatrix4& world_T_device, const HomogenousMatrix4& device_T_camera, const OculusTag& tag);
611 
612  /**
613  * Creates an image pyramid from a frame with padding
614  * @param yFrame The frame for which a frame pyramid will be created, must be valid
615  * @param layers The number of layers that the newly created pyramid should have, range: [1, infinity)
616  * @return The newly created image pyramid
617  */
618  static CV::FramePyramid createFramePyramid(const Frame& yFrame, const uint32_t layers);
619 
620  protected:
621 
622  /// A frame counter
623  uint32_t frameCounter_;
624 
625  /// A map tags that are (known and) tracked
627 
628  /// The input frames of the previous tracking iteration
629  Frame previousYFrames_[2];
630 
631  /// The frame pyramids of the previous tracking iteration
632  CV::FramePyramid previousFramePyramids_[2];
633 
634  /// The previous pose of the device
636 
637  /// The poses of the input cameras of the previous tracking iteration
638  HomogenousMatrix4 previousDevice_T_cameras_[2];
639 
640  /// The border area along the inside of the image which will be ignored completely (in pixels), range: [0, min(imageWidth, imageHeight))
641  static constexpr uint32_t frameBorder_ = 10u;
642 
643  /// The minimum absolute difference between foreground and background color in order to count as a transition
644  static constexpr uint32_t minIntensityThreshold_ = 10u;
645 
646  /// The minimum required number of observations before the motion of a tag can be declared static (i.e. it doesn't move in the world), range: [1, infinity)
647  static constexpr size_t numberRequiredObservationForStatic_ = 5;
648 
649  /// The maximum number of observations per tag that will be stored, range: [1, infinity)
650  static constexpr size_t numberMaxAllowedObservations_ = 15;
651 
652  /// The number of frames after which the detector is run to detect new tags, range: [1, infinity)
653  static constexpr unsigned int detectionCadence_ = 15u;
654 
655  /// The maximum projection error in pixels, range: [0, infinit)
656  static constexpr Scalar maxAllowedProjectionError_ = Scalar(0.5);
657 
658  /// The number of layers used for the frame pyramids, maximum supported pixel motion: 2^LAYERS, range: [1, infinity)
659  static constexpr unsigned int numberFrameLayers_ = 6u;
660 };
661 
662 inline void OculusTagTracker::TagObservationHistory::addObservation(const HomogenousMatrix4& world_T_camera, Vectors3&& objectPoints, Vectors2&& imagePoints, Vectors2&& trackingImagePoints, Vectors3&& trackingObjectPoints)
663 {
664  ocean_assert(world_T_camera.isValid());
665  ocean_assert(objectPoints.empty() == false);
666  ocean_assert(objectPoints.size() == imagePoints.size());
667  ocean_assert(trackingImagePoints.size() >= 4);
668  ocean_assert(trackingImagePoints.size() == trackingObjectPoints.size());
669 
670  cameraPoses_world_T_camera_.emplace_back(world_T_camera);
671  objectPointsGroups_.emplace_back(std::move(objectPoints));
672  imagePointsGroups_.emplace_back(std::move(imagePoints));
673  trackingImagePointsGroups_.emplace_back(std::move(trackingImagePoints));
674  trackingObjectPointsGroups_.emplace_back(std::move(trackingObjectPoints));
675 }
676 
678 {
679  if (otherObservationHistory.empty())
680  {
681  return;
682  }
683 
684  HomogenousMatrices4 otherCameraPoses_world_T_camera = std::move(otherObservationHistory.cameraPoses_world_T_camera());
685  Geometry::ObjectPointGroups otherObjectPointsGroup = std::move(otherObservationHistory.objectPointsGroups());
686  Geometry::ImagePointGroups otherImagePointsGroup = std::move(otherObservationHistory.imagePointsGroups());
687  Geometry::ImagePointGroups otherTrackingImagePointsGroup = std::move(otherObservationHistory.trackingImagePointsGroups());
688  Geometry::ObjectPointGroups otherTrackingObjectPointsGroup = std::move(otherObservationHistory.trackingObjectPointsGroups());
689 
690  ocean_assert(otherObservationHistory.empty());
691  ocean_assert(otherCameraPoses_world_T_camera.size() != 0);
692  ocean_assert(otherCameraPoses_world_T_camera.size() == otherObjectPointsGroup.size());
693  ocean_assert(otherCameraPoses_world_T_camera.size() == otherImagePointsGroup.size());
694  ocean_assert(otherCameraPoses_world_T_camera.size() == otherTrackingImagePointsGroup.size());
695  ocean_assert(otherCameraPoses_world_T_camera.size() == otherTrackingObjectPointsGroup.size());
696 
697  for (size_t i = 0; i < otherCameraPoses_world_T_camera.size(); ++i)
698  {
699  addObservation(otherCameraPoses_world_T_camera[i], std::move(otherObjectPointsGroup[i]), std::move(otherImagePointsGroup[i]), std::move(otherTrackingImagePointsGroup[i]), std::move(otherTrackingObjectPointsGroup[i]));
700  }
701 }
702 
704 {
705  ocean_assert(cameraPoses_world_T_camera_.size() == objectPointsGroups_.size());
706  ocean_assert(cameraPoses_world_T_camera_.size() == imagePointsGroups_.size());
707  ocean_assert(cameraPoses_world_T_camera_.size() == trackingImagePointsGroups_.size());
708  ocean_assert(cameraPoses_world_T_camera_.size() == trackingObjectPointsGroups_.size());
709 
710  return cameraPoses_world_T_camera_.size();
711 }
712 
714 {
715  ocean_assert(cameraPoses_world_T_camera_.size() == objectPointsGroups_.size());
716  ocean_assert(cameraPoses_world_T_camera_.size() == imagePointsGroups_.size());
717  ocean_assert(cameraPoses_world_T_camera_.size() == trackingImagePointsGroups_.size());
718  ocean_assert(cameraPoses_world_T_camera_.size() == trackingObjectPointsGroups_.size());
719 
720  return cameraPoses_world_T_camera_.empty();
721 }
722 
724 {
725  cameraPoses_world_T_camera_.clear();
726  objectPointsGroups_.clear();
727  imagePointsGroups_.clear();
728  trackingImagePointsGroups_.clear();
729  trackingObjectPointsGroups_.clear();
730 }
731 
733 {
734  return cameraPoses_world_T_camera_;
735 }
736 
738 {
739  return cameraPoses_world_T_camera_;
740 }
741 
743 {
744  return objectPointsGroups_;
745 }
746 
748 {
749  return objectPointsGroups_;
750 }
751 
753 {
754  return imagePointsGroups_;
755 }
756 
758 {
759  return imagePointsGroups_;
760 }
761 
763 {
764  return trackingObjectPointsGroups_;
765 }
766 
768 {
769  return trackingObjectPointsGroups_;
770 }
771 
773 {
774  return trackingImagePointsGroups_;
775 }
776 
778 {
779  return trackingImagePointsGroups_;
780 }
781 
783 {
784  ocean_assert(empty() == false);
785  ocean_assert(trackingObjectPointsGroups_.back().size() == trackingImagePointsGroups_.back().size());
786 
787  return trackingObjectPointsGroups_.back();
788 }
789 
791 {
792  ocean_assert(empty() == false);
793  ocean_assert(trackingObjectPointsGroups_.back().size() == trackingImagePointsGroups_.back().size());
794 
795  return trackingImagePointsGroups_.back();
796 }
797 
799 {
800  tag_ = otherTrackedTag.tag_;
801  tagObservationHistoryA_ = otherTrackedTag.tagObservationHistoryA_;
802  tagObservationHistoryB_ = otherTrackedTag.tagObservationHistoryB_;
803  trackingState_ = otherTrackedTag.trackingState_;
804  motionType_ = otherTrackedTag.motionType_;
805 }
806 
808 {
809  *this = std::move(otherTrackedTag);
810 }
811 
812 inline OculusTagTracker::TrackedTag::TrackedTag(OculusTag&& tag, TagObservationHistory&& tagObservationHistoryA, TagObservationHistory&& tagObservationHistoryB, const TrackingState trackingState, const MotionType motionType) :
813  tag_(std::move(tag)),
814  tagObservationHistoryA_(std::move(tagObservationHistoryA)),
815  tagObservationHistoryB_(std::move(tagObservationHistoryB)),
816  trackingState_(trackingState),
817  motionType_(motionType)
818 {
819  ocean_assert(tag_.isValid());
820  ocean_assert(tagObservationHistoryA_.size() != 0 || tagObservationHistoryB_.size() != 0);
821 }
822 
823 inline bool OculusTagTracker::TrackedTag::operator<(const TrackedTag& otherTrackedTag) const
824 {
825  ocean_assert(tag_.isValid() && otherTrackedTag.tag_.isValid());
826  return tag_.tagID() < otherTrackedTag.tag_.tagID();
827 }
828 
830 {
831  if (this != &otherTrackedTag)
832  {
833  tag_ = std::move(otherTrackedTag.tag_);
834  tagObservationHistoryA_ = std::move(otherTrackedTag.tagObservationHistoryA_);
835  tagObservationHistoryB_ = std::move(otherTrackedTag.tagObservationHistoryB_);
836  trackingState_ = otherTrackedTag.trackingState_;
837  motionType_ = otherTrackedTag.motionType_;
838  }
839 
840  return *this;
841 }
842 
844 {
845  return trackedTagMap_;
846 }
847 
848 } // namespace OculusTags
849 
850 } // namespace Tracking
851 
852 } // namespace Ocean
853 
854 #endif // META_OCEAN_TRACKING_OCULUSTAGS_OCULUSTAGTRACKER_H
This class implements the abstract base class for all AnyCamera objects.
Definition: AnyCamera.h:130
This class implements a frame pyramid.
Definition: FramePyramid.h:37
This class implements Ocean's image class.
Definition: Frame.h:1760
bool isValid() const
Returns whether this matrix is a valid homogeneous transformation.
Definition: HomogenousMatrix4.h:1806
This class implements a Oculus tag.
Definition: OculusTag.h:27
bool isValid() const
Returns true if the tag is valid.
uint16_t DataMatrix
Data structure for the payload of the code: 4 x 4 = 16 bits.
Definition: OculusTag.h:46
ReflectanceType
Reflectance types that the tag can have.
Definition: OculusTag.h:34
uint32_t tagID() const
Return the ID of this tag.
Definition: OculusTag.h:144
A history of tag observations for one camera Each observation consists of a camera pose,...
Definition: OculusTagTracker.h:139
const Geometry::ImagePointGroups & imagePointsGroups() const
Returns the image points stored in this instance.
Definition: OculusTagTracker.h:752
size_t removeObservations(const AnyCamera &anyCamera, const HomogenousMatrix4 &tag_T_world, const Scalar maximalError, const Scalar maximalOutliersPercent=Scalar(0.1))
Remove all observations in that do not match the specified pose This function starts with the newest ...
static bool optimizePose(const AnyCamera &anyCameraA, const AnyCamera &anyCameraB, const TagObservationHistory &tagObservationHistoryA, const TagObservationHistory &tagObservationHistoryB, const HomogenousMatrix4 &unoptimized_world_T_tag, HomogenousMatrix4 &optimized_world_T_tag)
Stereoscopic optimization of the world-to-tag transformation using the stored object-image point corr...
void addObservation(const HomogenousMatrix4 &world_T_camera, Vectors3 &&objectPoints, Vectors2 &&imagePoints, Vectors2 &&trackingImagePoints, Vectors3 &&trackingObjectPoints)
Adds a single observation of a tag to the collection.
Definition: OculusTagTracker.h:662
void append(TagObservationHistory &observationHistory)
Appends a collection of observations.
Definition: OculusTagTracker.h:677
bool empty() const
Returns true if no observations are stored in this instance.
Definition: OculusTagTracker.h:713
bool optimizePose(const AnyCamera &anyCamera, const HomogenousMatrix4 &unoptimized_world_T_tag, HomogenousMatrix4 &optimized_world_T_tag)
Monoscopic optimization of the world-to-tag transformation using the stored object-image point corres...
size_t size() const
Returns the number of observations.
Definition: OculusTagTracker.h:703
HomogenousMatrices4 cameraPoses_world_T_camera_
The world-to-camera transformations.
Definition: OculusTagTracker.h:284
const Geometry::ObjectPointGroups & trackingObjectPointsGroups() const
Returns the tracking object points stored in this instance.
Definition: OculusTagTracker.h:762
Geometry::ImagePointGroups trackingImagePointsGroups_
The image points that should be used to track the tag from frame to frame (temporally)
Definition: OculusTagTracker.h:293
const HomogenousMatrices4 & cameraPoses_world_T_camera() const
Returns the world-to-camera transformations.
Definition: OculusTagTracker.h:732
const Vectors3 & latestTrackingObjectPoints() const
Returns the latest object points that should be used for tracking.
Definition: OculusTagTracker.h:782
Geometry::ObjectPointGroups trackingObjectPointsGroups_
The image points that should be used to track the tag from frame to frame (temporally)
Definition: OculusTagTracker.h:296
void clear()
Removes all stored observations stored in this instance.
Definition: OculusTagTracker.h:723
Geometry::ImagePointGroups imagePointsGroups_
The image points corresponding to the object points at the given camera pose.
Definition: OculusTagTracker.h:290
const Vectors2 & latestTrackingImagePoints() const
Returns the latest image points that should be used for tracking.
Definition: OculusTagTracker.h:790
Geometry::ObjectPointGroups objectPointsGroups_
The object points of the tag (in reference frame of the tag)
Definition: OculusTagTracker.h:287
const Geometry::ObjectPointGroups & objectPointsGroups() const
Returns the objects points stored in the instance.
Definition: OculusTagTracker.h:742
const Geometry::ImagePointGroups & trackingImagePointsGroups() const
Returns the tracking image points stored in this instance.
Definition: OculusTagTracker.h:772
This class organizes the information of tracked tags.
Definition: OculusTagTracker.h:306
TagObservationHistory tagObservationHistoryA_
Definition: OculusTagTracker.h:350
TrackedTag(const TrackedTag &otherTrackedTag)
Copy constructor.
Definition: OculusTagTracker.h:798
OculusTag tag_
The tag that is managed.
Definition: OculusTagTracker.h:347
TagObservationHistory tagObservationHistoryB_
The observations of tag_ in the second camera.
Definition: OculusTagTracker.h:353
bool operator<(const TrackedTag &otherTrackedTag) const
Smaller-than comparator for tracked tag instances.
Definition: OculusTagTracker.h:823
MotionType motionType_
The motion type of the managed tag.
Definition: OculusTagTracker.h:359
TrackedTag & operator=(TrackedTag &&otherTrackedTag)
Move-Assign operator.
Definition: OculusTagTracker.h:829
TrackingState trackingState_
The tracking state of the managed tag.
Definition: OculusTagTracker.h:356
This class implements a detector and tracker for Oculus Tags.
Definition: OculusTagTracker.h:38
OculusTagTracker(OculusTagTracker &&oculusTagTracker)
Move constructor.
HomogenousMatrix4 previous_world_T_device_
The previous pose of the device.
Definition: OculusTagTracker.h:635
static TrackedTags detectTagsStereo(const SharedAnyCameras &anyCameras, const Frames &yFrames, const HomogenousMatrix4 &world_T_device, const HomogenousMatrices4 &device_T_cameras)
Detects Oculus Tags in stereo images.
static Vectors3 getTagObjectPoints(const TagPointGroup tagPointGroup, const Scalar tagSize, const OculusTag::DataMatrix dataMatrix=0u)
Assembles specific points on a tag in the tag object space.
static std::vector< uint8_t > generateModuleValues(const OculusTag::DataMatrix &dataMatrix)
Expands the data matrix of a tag to a vector of binary module values.
TagPointGroup
Definition of groups of object corners on a tag.
Definition: OculusTagTracker.h:104
static bool determineReflectanceTypeAndIntensityThreshold(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &tag_T_camera, const Scalar tagSize, OculusTag::ReflectanceType &reflectanceType, uint8_t &intensityThreshold, uint8_t &moduleValueDark)
Determines the reflectance type of an tag candidate and the intensity threshold between foreground an...
static bool locateTagInCamera(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_camera, const OculusTag &tag, TagObservationHistory &tagObservationHistory)
Locates a detected tag in a different camera image, e.g., the second camera of a stereo camera.
TrackedTagMap trackedTagMap_
A map tags that are (known and) tracked.
Definition: OculusTagTracker.h:626
static bool determineOrientation(const AnyCamera &anyCamera, const Frame &yFrame, const QuadDetector::Quad &unorientedQuad, const HomogenousMatrix4 &unorientedTag_T_camera, const Scalar tagSize, QuadDetector::Quad &orientedQuad, HomogenousMatrix4 &orientedTag_T_camera, const uint8_t &intensityThreshold, const uint8_t &binaryModuleValueDark)
Determines the top-left corner of the tag candidate.
static bool isTagVisible(const AnyCamera &anyCamera, const HomogenousMatrix4 &tag_T_camera, const Scalar tagSize, const Scalar signedBorder)
Checks if a valid tag is fully visible in the frame of a camera.
MotionType
Definition of the motion types of a tracked tag.
Definition: OculusTagTracker.h:63
@ MT_DYNAMIC
Definition: OculusTagTracker.h:68
@ MT_STATIC
The motion is static, i.e., the tag does not move relative to the camera(s) (e.g.,...
Definition: OculusTagTracker.h:71
static bool trackTagCornersTemporally(const AnyCamera &anyCamera, const CV::FramePyramid &framePyramid, const CV::FramePyramid &previousFramePyramid, const HomogenousMatrix4 &world_T_camera, const HomogenousMatrix4 &previous_world_T_tag, const Vectors3 &objectPoints, const Vectors2 &previousImagePoints, Vectors2 &imagePoints)
Tracks 2D-3D correspondences to temporally from frame to frame.
static bool extractRectifiedTagImage(const AnyCamera &anyCameraA, const AnyCamera &anyCameraB, const Frame &yFrameA, const Frame &yFrameB, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_cameraA, const HomogenousMatrix4 &device_T_cameraB, const OculusTag &tag, Frame &rectifiedFrame, const uint32_t rectifiedFrameSize=128u)
Creates a rectified image of a tag for visualization.
OculusTagTracker & operator=(OculusTagTracker &&oculusTagTracker)
Move-Assign operator.
static OculusTags detectTagsMono(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_camera, const Scalar defaultTagSize, const TagSizeMap &tagSizeMap=TagSizeMap(), TagObservationHistories *tagObservationHistories=nullptr)
Detect tags in a grayscale frame.
std::vector< TagObservationHistory > TagObservationHistories
A vector of tag observation histories.
Definition: OculusTagTracker.h:300
uint32_t frameCounter_
A frame counter.
Definition: OculusTagTracker.h:623
bool trackTagsStereo(const AnyCamera &anyCameraA, const AnyCamera &anyCameraB, const Frame &yFrameA, const Frame &yFrameB, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_cameraA, const HomogenousMatrix4 &device_T_cameraB, OculusTags &tags)
Tracks tags to the current stereo frames.
static bool readDataMatrix(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &tag_T_camera, const Scalar tagSize, const uint8_t &intensityThreshold, const uint8_t &binaryModuleValueDark, const uint8_t &binaryModuleValueLight, OculusTag::DataMatrix &dataMatrix)
Reads the modules from the data matrix of a tag.
static bool computePose(const AnyCamera &anyCamera, const Vectors2 &imagePoints, const Vectors3 &objectPoints, HomogenousMatrix4 &object_T_camera, const uint32_t minPoints=4u)
Computes the 6DOF pose of the tag relative to the location of the camera using 3D-to-2D point corresp...
static bool confirmDetectionInFrame(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_camera, const OculusTag &tag)
Confirms tag in image given a valid tag instance.
static CV::FramePyramid createFramePyramid(const Frame &yFrame, const uint32_t layers)
Creates an image pyramid from a frame with padding.
TrackingState
Definition of the states of a tracked tag.
Definition: OculusTagTracker.h:45
@ TS_TRACKING
A tag that is currently being tracked.
Definition: OculusTagTracker.h:53
@ TS_NEW_DETECTION
Definition: OculusTagTracker.h:50
std::vector< TrackedTag > TrackedTags
A vector of tracked tags.
Definition: OculusTagTracker.h:366
const TrackedTagMap & trackedTagMap() const
Returns the map of tracked tags.
Definition: OculusTagTracker.h:843
std::unordered_map< uint32_t, TrackedTag > TrackedTagMap
A map to store all tracked tags.
Definition: OculusTagTracker.h:363
static bool addTagObservationAndOptimize(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_camera, OculusTag &tag, const QuadDetector::Quad &quad, TagObservationHistory &tagObservationHistory)
Determines 2D-3D corner correspondences of a tag and optimizes the tag pose based on them.
static bool addTagObservation(const AnyCamera &anyCamera, const Frame &yFrame, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_camera, const OculusTag &tag, const QuadDetector::Quad &quad, TagObservationHistory &tagObservationHistory)
Determines a set 2D-3D corner correspondences of a tag.
static bool readTag(const AnyCamera &anyCamera, const Frame &yFrame, const QuadDetector::Quad &unorientedQuad, const HomogenousMatrix4 &world_T_device, const HomogenousMatrix4 &device_T_camera, const Scalar defaultTagSize, OculusTag &tag, QuadDetector::Quad &quad, const TagSizeMap &tagSizeMap=TagSizeMap())
Reads the tag information from an image given the locations of its four outer corners.
std::array< Vector2, 4 > Quad
Definition of a quadrilateral.
Definition: QuadDetector.h:40
std::vector< Frame > Frames
Definition of a vector holding padding frames.
Definition: Frame.h:1723
std::vector< ObjectPoints > ObjectPointGroups
Definition of a vector holding object points, so we have groups of object points.
Definition: geometry/Geometry.h:135
std::vector< ImagePoints > ImagePointGroups
Definition of a vector holding image points, so we have groups of image points.
Definition: geometry/Geometry.h:141
float Scalar
Definition of a scalar type.
Definition: Math.h:128
std::vector< HomogenousMatrix4 > HomogenousMatrices4
Definition of a vector holding HomogenousMatrix4 objects.
Definition: HomogenousMatrix4.h:73
std::vector< Vector2 > Vectors2
Definition of a vector holding Vector2 objects.
Definition: Vector2.h:64
SharedAnyCamerasT< Scalar > SharedAnyCameras
Definition of a vector holding AnyCamera objects.
Definition: AnyCamera.h:90
std::vector< Vector3 > Vectors3
Definition of a vector holding Vector3 objects.
Definition: Vector3.h:65
std::unordered_map< uint32_t, Scalar > TagSizeMap
A data structure to map tag IDs to tag sizes.
Definition: OculusTag.h:142
std::vector< OculusTag > OculusTags
A vector of Oculus tags.
Definition: OculusTag.h:136
The namespace covering the entire Ocean framework.
Definition: Accessor.h:15