Ocean
Loading...
Searching...
No Matches
RMVFeatureTracker6DOF.h
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7
8#ifndef META_OCEAN_TRACKING_RMV_RMV_TRACKER_6DOF_H
9#define META_OCEAN_TRACKING_RMV_RMV_TRACKER_6DOF_H
10
14
15#include "ocean/base/Accessor.h"
17#include "ocean/base/Thread.h"
18
20
22
26
27namespace Ocean
28{
29
30namespace Tracking
31{
32
33namespace RMV
34{
35
36/**
37 * This class implements a RMV feature tracker.
38 * @ingroup trackingrmv
39 */
40class OCEAN_TRACKING_RMV_EXPORT RMVFeatureTracker6DOF :
41 public VisualTracker,
42 private Thread
43{
44 public:
45
46 /**
47 * Creates a new RMV feature tracker object.
48 * @param detectorType The type of the feature detector to be sued
49 */
51
52 /**
53 * Destructs a RMV feature tracker object.
54 */
56
57 /**
58 * Returns the feature detector type of this tracker.
59 * @return Detector type
60 */
61 inline RMVFeatureDetector::DetectorType detectorType() const;
62
63 /**
64 * Returns the feature map of this tracker.
65 * @return Tracker feature map
66 */
67 inline const RMVFeatureMap& featureMap() const;
68
69 /**
70 * Sets or changes the feature map for this tracker.
71 * @param featureMap Feature map to be set
72 * @param randomGenerator Random number generator to be used
73 * @param autoUpdateMaxPositionOffset State determining whether the maximal position offset between two frame will be adjusted to the feature map size automatically
74 */
75 void setFeatureMap(const RMVFeatureMap& featureMap, RandomGenerator& randomGenerator, const bool autoUpdateMaxPositionOffset = true);
76
77 /**
78 * Executes the tracking step for a collection of frames and corresponding cameras
79 * @see VisualTracker::determinePoses().
80 */
81 bool determinePoses(const Frames& frames, const SharedAnyCameras& anyCameras, TransformationSamples& transformations, const Quaternion& world_R_camera = Quaternion(false), Worker* worker = nullptr) override;
82
83 /**
84 * Deprecated.
85 *
86 * Executes the tracking for a given frame.
87 * @see VisualTracker::determinePoses().
88 */
89 bool determinePoses(const Frame& frame, const PinholeCamera& camera, const bool frameIsUndistorted, TransformationSamples& transformations, const Quaternion& previousCamera_R_camera = Quaternion(false), Worker* worker = nullptr) override;
90
91 private:
92
93 /**
94 * Determines the camera pose for a given frame (should be the current frame).
95 * @param frame The frame which will be used for pose determination, must be valid
96 * @param camera The camera profile defining the projection, with same dimension as the given frame
97 * @param frameIsUndistorted True, if the original input frame is undistorted and thus feature must not be undistorted explicitly
98 * @param world_T_camera The resulting camera pose
99 * @param worker Optional worker object to distribute the computation
100 * @return True, if succeeded
101 */
102 bool internDeterminePose(const Frame& frame, const AnyCamera& camera, const bool frameIsUndistorted, HomogenousMatrix4& world_T_camera, Worker* worker);
103
104 /**
105 * Determines the camera pose if no reliable knowledge is given from any previous camera frame.
106 * This function applies a multi-scale/pyramid approach for pose estimation.
107 * @param frame The frame which will be used for pose determination, must be valid
108 * @param camera The camera profile defining the projection, must be valid
109 * @param world_T_camera The resulting camera pose if the pose could be determined
110 * @param worker Optional worker object to distribute the computation
111 * @return True, if succeeded
112 */
113 bool determinePoseWithoutKnowledgePyramid(const Frame& frame, const AnyCamera& camera, HomogenousMatrix4& world_T_camera, Worker* worker);
114
115 /**
116 * Determines the camera pose if no reliable knowledge is given from any previous camera frame.
117 * This function does not apply any multi-scale/pyramid approach for pose estimation.
118 * @param camera The camera profile defining the projection, must be valid
119 * @param imagePoints The feature points that have been determined in the current camera frame, should be sorted according to the strength of the features, at least 20
120 * @param world_T_camera The resulting camera pose if the pose could be determined
121 * @param worker Optional worker object to distribute the computation
122 * @return True, if succeeded
123 */
124 bool determinePoseWithoutKnowledgeDefault(const AnyCamera& camera, const Vectors2& imagePoints, HomogenousMatrix4& world_T_camera, Worker* worker);
125
126 /**
127 * Determines a precise pose based on a rough camera pose and mainly based on strong feature correspondences from the previous frame.
128 * @param world_T_roughCamera The already known rough camera pose for the current frame, must be valid
129 * @param camera The camera profile defining the projection, must be valid
130 * @param imagePoints The image points (strong feature points) that has been determined in the current camera frame, at least 10, will be empty if this function succeeds
131 * @param world_T_camera The resulting precise pose, which will be very accurate
132 * @param worker Optional worker object to distribute the computation
133 * @return True, if succeeded
134 * @see determinePoseWithAnyPreviousCorrespondences().
135 */
136 bool determinePoseWithStrongPreviousCorrespondences(const HomogenousMatrix4& world_T_roughCamera, const AnyCamera& camera, Vectors2& imagePoints, HomogenousMatrix4& world_T_camera, Worker* worker);
137
138 /**
139 * Determines a precise pose based on a rough camera pose and mainly based on any feature correspondences from the previous frame.
140 * This function does not take strong correspondences (from the previous frame) but any as this function should be the backup solution for the strong-correspondence-solution.
141 * @param world_T_roughCamera The already known rough camera pose for the current frame, must be valid
142 * @param camera The camera profile defining the projection, must be valid
143 * @param imagePoints The image points (strong feature points) that has been determined in the current camera frame, at least 10, will be empty if this function succeeds
144 * @param world_T_camera The resulting precise pose, which will be very accurate
145 * @param worker Optional worker object to distribute the computation
146 * @return True, if succeeded
147 * @see determinePoseWithStrongPreviousCorrespondences().
148 */
149 bool determinePoseWithAnyPreviousCorrespondences(const HomogenousMatrix4& world_T_roughCamera, const AnyCamera& camera, Vectors2& imagePoints, HomogenousMatrix4& world_T_camera, Worker* worker);
150
151 /**
152 * Determines the precise camera pose based on an already known rough camera pose.
153 * @param world_T_roughCamera The already known rough camera pose, must be valid
154 * @param camera The camera profile defining the projection, must be valid
155 * @param imagePoints The image points (strong feature points) for which the precise camera pose will be determined, at least 5, will be empty if this function succeeds
156 * @param world_T_camera The resulting precise pose
157 * @param worker Optional worker object to distribute the computation
158 * @return True, if succeeded
159 */
160 bool determinePoseWithRoughPose(const HomogenousMatrix4& world_T_roughCamera, const AnyCamera& camera, Vectors2& imagePoints, HomogenousMatrix4& world_T_camera, Worker* worker);
161
162 /**
163 * Refines a rough camera pose by application of strong feature correspondences from the previous frame.
164 * This function tries to re-find/determine strong and reliable 2D/3D feature correspondences from the previous frame within two stages.<br>
165 * The resulting pose will be significantly better/more precise than provided rough pose but still need improvements.
166 * @param roughFlippedCamera_T_world The already known rough inverted and flipped camera pose for the current frame, must be valid
167 * @param camera The camera profile defining the projection, must be valid
168 * @param imagePoints The image points (strong feature points) that has been determined in the current camera frame, at least 10
169 * @param flippedCamera_T_world The resulting improved inverted and flipped pose, which may still not be very accurate
170 * @return True, if succeeded
171 */
172 bool refinePoseWithStrongPreviousCorrespondencesIF(const HomogenousMatrix4& roughFlippedCamera_T_world, const AnyCamera& camera, const Vectors2& imagePoints, HomogenousMatrix4& flippedCamera_T_world);
173
174 /**
175 * Detects feature points in a given frame optional within a defined sub-region.
176 * @param yFrame The frame in which the feature points will be detected, must have pixel format FORMAT_Y8
177 * @param frameIsUndistorted True, if the original input frame is undistorted and thus feature must not be undistorted explicitly
178 * @param boundingBox The bounding box defining the sub-region inside the frame in which the feature points will be detected, with range (-infinity, infinity)x(-infinity, infinity), and invalid bounding box to detect features in the entire frame
179 * @param worker Optional worker object to distribute the computation
180 */
181 Vectors2 detectFeatures(const Frame& yFrame, const bool frameIsUndistorted, const Box2& boundingBox = Box2(), Worker* worker = nullptr);
182
183 /**
184 * Returns whether the data that is processed asynchronously (between two successive camera frame - directly after a camera pose could be determined) is available already.
185 * @return True, if so
186 * @see startAsynchronousDataProcessingIF().
187 */
189
190 /**
191 * Starts the asynchronous data processing for precise (inverted and flipped) pose an the corresponding image points.
192 * The process is asynchronous as the resulting information is necessary for the next camera frame (to simplify the tracking).<br>
193 * Due to performance reasons, the processing is not done in the moment the next frame is handled but in the meantime.
194 * @param flippedCamera_T_world The precise inverted and flipped camera pose to be used for the data processing
195 * @param camera The camera profile defining the projection
196 * @param imagePoints The image points to be used for the data processing, will be moved
197 * @see asynchronousDataProcessed().
198 */
199 void startAsynchronousDataProcessingIF(const HomogenousMatrix4& flippedCamera_T_world, const AnyCamera& camera, Vectors2&& imagePoints);
200
201 /**
202 * Thread run function.
203 */
204 void threadRun() override;
205
206 /**
207 * Determines the used 3D object points using the final most accurate pose possible.
208 * @param fineFlippedCamera_T_world The precise inverted and flipped camera pose to be used
209 * @param camera The camera object to be used
210 * @param imagePoints Current detected image points
211 * @param minimalStrongObjectPoints The minimal number of strong object points that need to be found to avoid the determination of semi-strong features, with range [1, infinity)
212 * @param strongObjectPointIndices Resulting indices of valid and strong object points from the feature map (strongly matching to the image points and the given pose), with ascending order
213 * @param moderateObjectPointIndices Optional resulting indices of valid and moderate/semi-strong object points from the feature map (only in the case not enough strong features could be found regarding to the specified minimal number), with ascending order
214 * @param usedObjectPointIndices Resulting indices of valid object points from the feature map (weakly matching to the image points and the given pose), with ascending order
215 * @return True, if succeeded
216 */
217 bool determineUsedFeaturesIF(const HomogenousMatrix4& fineFlippedCamera_T_world, const AnyCamera& camera, const Vectors2& imagePoints, const size_t minimalStrongObjectPoints, Indices32& strongObjectPointIndices, Indices32& moderateObjectPointIndices, Indices32& usedObjectPointIndices);
218
219 /**
220 * Adds unique and reliable 2D/3D correspondences based on known reliable object points (e.g., from the previous frame) and a uniqueness term.
221 * @param roughFlippedCamera_T_world The already known rough inverted and flipped camera pose for the current frame, must be valid
222 * @param camera The camera profile defining the projection, must be valid
223 * @param imagePoints The image points (strong feature points) that has been determined in the current camera frame, must be valid
224 * @param numberImagePoints The number of provided image points, with range [1, infinity)
225 * @param objectPointAccessor The accessor providing all reliable object points for which the corresponding image points will be determined, at least one
226 * @param searchWindow Size of the search window (as 'radius') in which possible candidate points will be investigated, in pixel, with range (0, infinity)
227 * @param uniquenessSqrFactor Factor to distinguish between a unique and non-unique correspondence, with range (0, infinity)
228 * @param resultingObjectPoints The object points to which the new determined object points (from unique correspondences) will be added
229 * @param resultingImagePoints the image points to which the new determined image points (from unique correspondences) will be added
230 */
231 static void addUniqueCorrespondencesIF(const HomogenousMatrix4& roughFlippedCamera_T_world, const AnyCamera& camera, const Vector2* imagePoints, const size_t numberImagePoints, const ConstIndexedAccessor<Vector3>& objectPointAccessor, const Scalar searchWindow, const Scalar uniquenessSqrFactor, Vectors3& resultingObjectPoints, Vectors2& resultingImagePoints);
232
233 /**
234 * Refines a rough camera pose by application of guessed feature correspondences between projected object points and image points visible in the current camera frame.
235 * The feature correspondences a guessed due to uniqueness terms.
236 * @param roughFlippedCamera_T_world The already known rough inverted flipped camera pose for the current frame, must be valid
237 * @param camera The camera profile defining the projection, must be valid
238 * @param imagePoints The image points (strong feature points) that has been determined in the current camera frame, at least 10
239 * @param objectPoints The object points that will be used for pose determination, can be the entire set of object points from the feature map or a subset with special properties (e.g., very strong ones), at least 10
240 * @param flippedCamera_T_world The resulting inverted flipped precise camera pose
241 * @param useNumberImagePoints The maximal number of image points that will be used for tracking - so that the (first) strongest image points will be used only, with range [10, infinity)
242 * @param useNumberObjectPoints The maximal number of object points that will be used for tracking, so that the (first) strong object points will be used only, with range [10, infinity)
243 * @param searchWindow Size of the search window (as 'radius') in which possible candidate points will be investigated, in pixel, with range (0, infinity)
244 * @param uniquenessSqrFactor Factor to distinguish between a unique and non-unique correspondence, with range (0, infinity)
245 * @param maxSqrDistance The maximal square distance for a unique correspondence, with range [0, infinity)
246 * @param worker Optional worker object to distribute the computation
247 * @return The number of points correspondences that have been used during the pose refinement, with range [5, infinity), 0 if the pose refinement failed
248 */
249 static unsigned int refinePoseIF(const HomogenousMatrix4& roughFlippedCamera_T_world, const AnyCamera& camera, const Vectors2& imagePoints, const Vectors3& objectPoints, HomogenousMatrix4& flippedCamera_T_world, const unsigned int useNumberImagePoints, const unsigned int useNumberObjectPoints, const Scalar searchWindow, const Scalar uniquenessSqrFactor, const Scalar maxSqrDistance = Numeric::maxValue(), Worker* worker = nullptr);
250
251 /**
252 * Returns a size factor that reflects the size of the current camera resolution.
253 * The larger the camera dimension the larger the factor, the default camera dimension is 640x480 with a resulting factor of 1.
254 * @return The size factor, with range (0, infinity)
255 */
256 static inline Scalar cameraResolutionFactor(const AnyCamera& camera);
257
258 private:
259
260 /// Feature map of this tracker.
262
263 /// Pose projection set.
265
266 /// The type of the feature detector to be used.
268
269 /// The current strength threshold for the feature tracker, will be adjusted as time goes by, with range [0, infinity)
271
272 /// Maximal number of feature points to be used for the pose projections, with range [10, infinity)
274
275 /// The radius defining the circle around each strong feature point not containing more than one projected object point so that a point correspondence counts as strong, in pixels, with range (trackerSemiStrongCorrespondencesEmptyAreaRadius_, infinity)
277
278 /// The radius defining the circle around each semi-strong feature point not containing more than one projected object point so that a point correspondence counts as semi-strong, in pixels, with range [0, trackerStrongCorrespondencesEmptyAreaRadius_)
280
281 /// The random number generator object to be used.
283
284 /// True, if the asynchronous data processing function is currently active.
286
287 /// The inverted and flipped camera pose to be used during the asynchronous data processing function.
289
290 /// The camera profile to be used during the asynchronous data processing function.
292
293 /// The image points to be used during the asynchronous data processing function.
295
296 /// The signal used to invoke the asynchronous data processing function.
298
299 /// The lock for the asynchronous data processing function.
301};
302
307
309{
310 return trackerFeatureMap_;
311}
312
314{
315 ocean_assert(camera.isValid());
316
317 return Vector2(Scalar(camera.width()), Scalar(camera.height())).length() * Scalar(0.00125); // 1/800 = sqrt(640^2 + 480^2)
318}
319
320}
321
322}
323
324}
325
326#endif // META_OCEAN_TRACKING_RMV_RMV_TRACKER_6DOF_H
This class implements the abstract base class for all AnyCamera objects.
Definition AnyCamera.h:131
virtual unsigned int width() const =0
Returns the width of the camera image.
virtual unsigned int height() const =0
Returns the height of the camera image.
virtual bool isValid() const =0
Returns whether this camera is valid.
This class implements a base class for all indexed-based accessors allowing a constant reference acce...
Definition Accessor.h:241
This class implements Ocean's image class.
Definition Frame.h:1879
This class implements a recursive lock object.
Definition Lock.h:31
This class implements a generator for random numbers.
Definition RandomGenerator.h:42
This class implements a signal.
Definition Signal.h:31
This class implements a thread.
Definition Thread.h:115
This class implements a set of pose projections.
Definition PoseProjection.h:121
DetectorType
Definition of individual feature detectors.
Definition RMVFeatureDetector.h:41
This class implements a feature map.
Definition RMVFeatureMap.h:33
This class implements a RMV feature tracker.
Definition RMVFeatureTracker6DOF.h:43
bool determinePoseWithRoughPose(const HomogenousMatrix4 &world_T_roughCamera, const AnyCamera &camera, Vectors2 &imagePoints, HomogenousMatrix4 &world_T_camera, Worker *worker)
Determines the precise camera pose based on an already known rough camera pose.
bool determinePoseWithStrongPreviousCorrespondences(const HomogenousMatrix4 &world_T_roughCamera, const AnyCamera &camera, Vectors2 &imagePoints, HomogenousMatrix4 &world_T_camera, Worker *worker)
Determines a precise pose based on a rough camera pose and mainly based on strong feature corresponde...
bool determinePoseWithAnyPreviousCorrespondences(const HomogenousMatrix4 &world_T_roughCamera, const AnyCamera &camera, Vectors2 &imagePoints, HomogenousMatrix4 &world_T_camera, Worker *worker)
Determines a precise pose based on a rough camera pose and mainly based on any feature correspondence...
Signal trackerAsynchronousSignal_
The signal used to invoke the asynchronous data processing function.
Definition RMVFeatureTracker6DOF.h:297
Scalar trackerFeatureDetectorStrength_
The current strength threshold for the feature tracker, will be adjusted as time goes by,...
Definition RMVFeatureTracker6DOF.h:270
static unsigned int refinePoseIF(const HomogenousMatrix4 &roughFlippedCamera_T_world, const AnyCamera &camera, const Vectors2 &imagePoints, const Vectors3 &objectPoints, HomogenousMatrix4 &flippedCamera_T_world, const unsigned int useNumberImagePoints, const unsigned int useNumberObjectPoints, const Scalar searchWindow, const Scalar uniquenessSqrFactor, const Scalar maxSqrDistance=Numeric::maxValue(), Worker *worker=nullptr)
Refines a rough camera pose by application of guessed feature correspondences between projected objec...
static void addUniqueCorrespondencesIF(const HomogenousMatrix4 &roughFlippedCamera_T_world, const AnyCamera &camera, const Vector2 *imagePoints, const size_t numberImagePoints, const ConstIndexedAccessor< Vector3 > &objectPointAccessor, const Scalar searchWindow, const Scalar uniquenessSqrFactor, Vectors3 &resultingObjectPoints, Vectors2 &resultingImagePoints)
Adds unique and reliable 2D/3D correspondences based on known reliable object points (e....
RMVFeatureMap trackerFeatureMap_
Feature map of this tracker.
Definition RMVFeatureTracker6DOF.h:261
RMVFeatureTracker6DOF(const RMVFeatureDetector::DetectorType detectorType)
Creates a new RMV feature tracker object.
RandomGenerator trackerRandomGenerator_
The random number generator object to be used.
Definition RMVFeatureTracker6DOF.h:282
const RMVFeatureMap & featureMap() const
Returns the feature map of this tracker.
Definition RMVFeatureTracker6DOF.h:308
bool internDeterminePose(const Frame &frame, const AnyCamera &camera, const bool frameIsUndistorted, HomogenousMatrix4 &world_T_camera, Worker *worker)
Determines the camera pose for a given frame (should be the current frame).
RMVFeatureDetector::DetectorType trackerDetectorType_
The type of the feature detector to be used.
Definition RMVFeatureTracker6DOF.h:267
Vectors2 trackerAsynchronousDataProcessingImagePoints_
The image points to be used during the asynchronous data processing function.
Definition RMVFeatureTracker6DOF.h:294
bool determinePoses(const Frames &frames, const SharedAnyCameras &anyCameras, TransformationSamples &transformations, const Quaternion &world_R_camera=Quaternion(false), Worker *worker=nullptr) override
Executes the tracking step for a collection of frames and corresponding cameras.
size_t trackerMaximalPoseProjectionFeatureNumber_
Maximal number of feature points to be used for the pose projections, with range [10,...
Definition RMVFeatureTracker6DOF.h:273
bool determinePoseWithoutKnowledgePyramid(const Frame &frame, const AnyCamera &camera, HomogenousMatrix4 &world_T_camera, Worker *worker)
Determines the camera pose if no reliable knowledge is given from any previous camera frame.
HomogenousMatrix4 trackerAsynchronousDataProcessingFlippedCamera_T_world_
The inverted and flipped camera pose to be used during the asynchronous data processing function.
Definition RMVFeatureTracker6DOF.h:288
bool refinePoseWithStrongPreviousCorrespondencesIF(const HomogenousMatrix4 &roughFlippedCamera_T_world, const AnyCamera &camera, const Vectors2 &imagePoints, HomogenousMatrix4 &flippedCamera_T_world)
Refines a rough camera pose by application of strong feature correspondences from the previous frame.
bool determineUsedFeaturesIF(const HomogenousMatrix4 &fineFlippedCamera_T_world, const AnyCamera &camera, const Vectors2 &imagePoints, const size_t minimalStrongObjectPoints, Indices32 &strongObjectPointIndices, Indices32 &moderateObjectPointIndices, Indices32 &usedObjectPointIndices)
Determines the used 3D object points using the final most accurate pose possible.
bool asynchronousDataProcessed()
Returns whether the data that is processed asynchronously (between two successive camera frame - dire...
bool determinePoses(const Frame &frame, const PinholeCamera &camera, const bool frameIsUndistorted, TransformationSamples &transformations, const Quaternion &previousCamera_R_camera=Quaternion(false), Worker *worker=nullptr) override
Deprecated.
Lock trackerAsynchronousDataProcessingLock_
The lock for the asynchronous data processing function.
Definition RMVFeatureTracker6DOF.h:300
Vectors2 detectFeatures(const Frame &yFrame, const bool frameIsUndistorted, const Box2 &boundingBox=Box2(), Worker *worker=nullptr)
Detects feature points in a given frame optional within a defined sub-region.
Scalar trackerSemiStrongCorrespondencesEmptyAreaRadius_
The radius defining the circle around each semi-strong feature point not containing more than one pro...
Definition RMVFeatureTracker6DOF.h:279
void startAsynchronousDataProcessingIF(const HomogenousMatrix4 &flippedCamera_T_world, const AnyCamera &camera, Vectors2 &&imagePoints)
Starts the asynchronous data processing for precise (inverted and flipped) pose an the corresponding ...
PoseProjectionSet trackerPoseProjectionSet_
Pose projection set.
Definition RMVFeatureTracker6DOF.h:264
void setFeatureMap(const RMVFeatureMap &featureMap, RandomGenerator &randomGenerator, const bool autoUpdateMaxPositionOffset=true)
Sets or changes the feature map for this tracker.
static Scalar cameraResolutionFactor(const AnyCamera &camera)
Returns a size factor that reflects the size of the current camera resolution.
Definition RMVFeatureTracker6DOF.h:313
bool trackerAsynchronousDataProcessingActive_
True, if the asynchronous data processing function is currently active.
Definition RMVFeatureTracker6DOF.h:285
SharedAnyCamera trackerAsynchronousDataProcessingCamera_
The camera profile to be used during the asynchronous data processing function.
Definition RMVFeatureTracker6DOF.h:291
Scalar trackerStrongCorrespondencesEmptyAreaRadius_
The radius defining the circle around each strong feature point not containing more than one projecte...
Definition RMVFeatureTracker6DOF.h:276
bool determinePoseWithoutKnowledgeDefault(const AnyCamera &camera, const Vectors2 &imagePoints, HomogenousMatrix4 &world_T_camera, Worker *worker)
Determines the camera pose if no reliable knowledge is given from any previous camera frame.
void threadRun() override
Thread run function.
RMVFeatureDetector::DetectorType detectorType() const
Returns the feature detector type of this tracker.
Definition RMVFeatureTracker6DOF.h:303
~RMVFeatureTracker6DOF()
Destructs a RMV feature tracker object.
This class implements a base class for all visual tracker objects.
Definition tracking/VisualTracker.h:45
std::vector< TransformationSample > TransformationSamples
Definition of a vector holding a transformation sample object.
Definition tracking/VisualTracker.h:98
T length() const
Returns the length of the vector.
Definition Vector2.h:627
This class implements a worker able to distribute function calls over different threads.
Definition Worker.h:33
std::vector< Index32 > Indices32
Definition of a vector holding 32 bit index values.
Definition Base.h:96
std::vector< Frame > Frames
Definition of a vector holding padding frames.
Definition Frame.h:1842
std::vector< Vector2 > Vectors2
Definition of a vector holding Vector2 objects.
Definition Vector2.h:64
float Scalar
Definition of a scalar type.
Definition Math.h:129
std::shared_ptr< AnyCamera > SharedAnyCamera
Definition of a shared pointer holding an AnyCamera object with Scalar precision.
Definition AnyCamera.h:61
std::vector< Vector3 > Vectors3
Definition of a vector holding Vector3 objects.
Definition Vector3.h:65
VectorT2< Scalar > Vector2
Definition of a 2D vector.
Definition Vector2.h:28
SharedAnyCamerasT< Scalar > SharedAnyCameras
Definition of a vector holding AnyCamera objects.
Definition AnyCamera.h:91
The namespace covering the entire Ocean framework.
Definition Accessor.h:15