8 #ifndef META_OCEAN_CV_FRAME_CONVERTER_Y_10_PACKED_H
9 #define META_OCEAN_CV_FRAME_CONVERTER_Y_10_PACKED_H
19 #include <unordered_map>
80 static inline void convertY10_PackedToY8Linear(
const uint8_t*
const source, uint8_t*
const target,
const unsigned int width,
const unsigned int height,
const ConversionFlag flag,
const unsigned int sourcePaddingElements,
const unsigned int targetPaddingElements,
Worker* worker =
nullptr);
98 static inline void convertY10_PackedToY8GammaLUT(
const uint8_t*
const source, uint8_t*
const target,
const unsigned int width,
const unsigned int height,
const ConversionFlag flag,
const float gamma,
const unsigned int sourcePaddingElements,
const unsigned int targetPaddingElements,
Worker* worker =
nullptr);
116 static void convertY10_PackedToY8GammaApproximated(
const uint8_t*
const source, uint8_t*
const target,
const unsigned int width,
const unsigned int height,
const ConversionFlag flag,
const float gamma,
const unsigned int sourcePaddingElements,
const unsigned int targetPaddingElements,
Worker* worker =
nullptr);
129 static inline void convertY10_PackedToY10(
const uint8_t*
const source, uint16_t*
const target,
const unsigned int width,
const unsigned int height,
const ConversionFlag flag,
const unsigned int sourcePaddingElements,
const unsigned int targetPaddingElements,
Worker* worker =
nullptr);
142 static inline void convertY10_PackedToBGR24(
const uint8_t*
const source, uint8_t*
const target,
const unsigned int width,
const unsigned int height,
const ConversionFlag flag,
const unsigned int sourcePaddingElements,
const unsigned int targetPaddingElements,
Worker* worker =
nullptr);
155 static inline void convertY10_PackedToRGB24(
const uint8_t*
const source, uint8_t*
const target,
const unsigned int width,
const unsigned int height,
const ConversionFlag flag,
const unsigned int sourcePaddingElements,
const unsigned int targetPaddingElements,
Worker* worker =
nullptr);
185 template <
unsigned int tStep01,
unsigned int tStep12>
186 static void convertRowY10_PackedToY8GammaApproximated(
const uint8_t* source, uint8_t* target,
const size_t size,
const void* parameters);
196 static void convertRowY10_PackedToY10(
const uint8_t* source, uint16_t* target,
const size_t size,
const void* unusedParameters =
nullptr);
208 #if defined(OCEAN_HARDWARE_NEON_VERSION) && OCEAN_HARDWARE_NEON_VERSION >= 10
215 static OCEAN_FORCE_INLINE
void convert16PixelY10_PackedToY8LinearNEON(
const uint8_t*
const source, uint8_t*
const target);
227 template <
unsigned int tStep01,
unsigned int tStep12>
228 static OCEAN_FORCE_INLINE
void convert16PixelY10_PackedToY8ApproximatedNEON(
const uint8_t*
const source,
const int16x4_t& m0_256_s_16x4,
const int16x4_t& m1_256_s_16x4,
const int16x4_t& m2_256_s_16x4,
const int16x8_t& c1_s_16x8,
const int16x8_t& c2_s_16x8, uint8_t*
const target);
235 ocean_assert(source !=
nullptr && target !=
nullptr);
236 ocean_assert(width >= 4u && height >= 1u);
237 ocean_assert(width % 4u == 0u);
239 const unsigned int sourceStrideElements = width * 5u / 4u + sourcePaddingElements;
240 const unsigned int targetStrideElements = width * 1u + targetPaddingElements;
242 constexpr
void* options =
nullptr;
244 const bool areContinuous = sourcePaddingElements == 0u && targetPaddingElements == 0u;
246 FrameConverter::convertGenericPixelFormat(source, target, width, height, sourceStrideElements, targetStrideElements, flag,
convertRowY10_PackedToY8Linear, CV::FrameChannels::reverseRowPixelOrderInPlace<uint8_t, 1u>, areContinuous, options, worker);
251 ocean_assert(source !=
nullptr && target !=
nullptr);
252 ocean_assert(width >= 4u && height >= 1u);
253 ocean_assert(width % 4u == 0u);
255 ocean_assert(gamma > 0.0f && gamma < 2.0f);
257 const unsigned int sourceStrideElements = width * 5u / 4u + sourcePaddingElements;
258 const unsigned int targetStrideElements = width * 1u + targetPaddingElements;
262 const bool areContinuous = sourcePaddingElements == 0u && targetPaddingElements == 0u;
264 FrameConverter::convertGenericPixelFormat(source, target, width, height, sourceStrideElements, targetStrideElements, flag,
convertRowY10_PackedToY8GammaLUT, CV::FrameChannels::reverseRowPixelOrderInPlace<uint8_t, 1u>, areContinuous, options, worker);
269 ocean_assert(source !=
nullptr && target !=
nullptr);
270 ocean_assert(width >= 4u && height >= 1u);
271 ocean_assert(width % 4u == 0u);
273 const unsigned int sourceStrideElements = width * 5u / 4u + sourcePaddingElements;
274 const unsigned int targetStrideElements = width * 1u + targetPaddingElements;
276 constexpr
void* options =
nullptr;
278 const bool areContinuous = sourcePaddingElements == 0u && targetPaddingElements == 0u;
280 FrameConverter::convertGenericPixelFormat(source, target, width, height, sourceStrideElements, targetStrideElements, flag,
convertRowY10_PackedToY10, CV::FrameChannels::reverseRowPixelOrderInPlace<uint16_t, 1u>, areContinuous, options, worker);
285 convertY10_PackedToRGB24(source, target, width, height, flag, sourcePaddingElements, targetPaddingElements, worker);
290 ocean_assert(source !=
nullptr && target !=
nullptr);
291 ocean_assert(width >= 4u && height >= 1u);
292 ocean_assert(width % 4u == 0u);
294 const unsigned int sourceStrideElements = width * 5u / 4u + sourcePaddingElements;
295 const unsigned int targetStrideElements = width * 3u + targetPaddingElements;
297 constexpr
void* options =
nullptr;
299 const bool areContinuous = sourcePaddingElements == 0u && targetPaddingElements == 0u;
301 FrameConverter::convertGenericPixelFormat(source, target, width, height, sourceStrideElements, targetStrideElements, flag,
convertRowY10_PackedToYYY24Linear, CV::FrameChannels::reverseRowPixelOrderInPlace<uint8_t, 3u>, areContinuous, options, worker);
304 template <
unsigned int tStep01,
unsigned int tStep12>
307 static_assert(0u < tStep01 && tStep01 < tStep12 && tStep12 < 1023u,
"Invalid steps");
309 ocean_assert(source !=
nullptr && target !=
nullptr);
310 ocean_assert(size >= 4 && size % 4 == 0);
311 ocean_assert(parameters !=
nullptr);
316 const int* coefficients =
reinterpret_cast<const int*
>(parameters);
318 const int32_t m0_256 = coefficients[0];
319 const int32_t m1_256 = coefficients[1];
320 const int32_t m2_256 = coefficients[2];
322 const int32_t c1 = coefficients[3];
323 const int32_t c2 = coefficients[4];
325 size_t blocks4 = size / size_t(4);
327 #if defined(OCEAN_HARDWARE_NEON_VERSION) && OCEAN_HARDWARE_NEON_VERSION >= 10
329 const size_t blocks16 = size / size_t(16);
331 const int16x4_t m0_256_s_16x4 = vdup_n_s16(int16_t(m0_256));
332 const int16x4_t m1_256_s_16x4 = vdup_n_s16(int16_t(m1_256));
333 const int16x4_t m2_256_s_16x4 = vdup_n_s16(int16_t(m2_256));
335 const int16x8_t c1_s_16x8 = vdupq_n_s16(int16_t(c1));
336 const int16x8_t c2_s_16x8 = vdupq_n_s16(int16_t(c2));
338 for (
size_t n = 0; n < blocks16; ++n)
340 convert16PixelY10_PackedToY8ApproximatedNEON<tStep01, tStep12>(source, m0_256_s_16x4, m1_256_s_16x4, m2_256_s_16x4, c1_s_16x8, c2_s_16x8, target);
346 blocks4 = (size - blocks16 * size_t(16)) /
size_t(4);
347 ocean_assert(blocks4 <= size /
size_t(4));
353 const int32_t c1_256 = c1 * 256;
354 const int32_t c2_256 = c2 * 256;
356 for (
size_t n = 0; n < blocks4; ++n)
360 int32_t(uint16_t(source[0]) << uint16_t(2) | (uint16_t(source[4]) & uint16_t(0b00000011))),
361 int32_t(uint16_t(source[1]) << uint16_t(2) | ((uint16_t(source[4]) & uint16_t(0b00001100)) >> uint16_t(2))),
362 int32_t(uint16_t(source[2]) << uint16_t(2) | ((uint16_t(source[4]) & uint16_t(0b00110000)) >> uint16_t(4))),
363 int32_t(uint16_t(source[3]) << uint16_t(2) | (uint16_t(source[4]) >> uint16_t(6)))
366 for (
unsigned int i = 0u; i < 4u; ++i)
368 const uint32_t& xx = x[i];
372 result256 = (m0_256 * xx);
374 else if (xx <= tStep12)
376 result256 = (m1_256 * xx + c1_256);
380 result256 = (m2_256 * xx + c2_256);
383 ocean_assert(0 <= result256 && result256 <= 255 * 256);
385 target[i] = int8_t((uint32_t(result256) + 128u) >> 8u);
393 #if defined(OCEAN_HARDWARE_NEON_VERSION) && OCEAN_HARDWARE_NEON_VERSION >= 10
399 const uint8x16_t packedA_u_8x16 = vld1q_u8(source);
400 const uint8x8_t packedB_u_8x8 = vld1_u8(source + 12);
404 constexpr uint8x16_t shuffle_u_8x16 = {16u, 16u, 16u, 16u, 0u, 1u, 2u, 3u, 5u, 6u, 7u, 8u, 10u, 11u, 12u, 13u};
405 const uint8x16_t intermediateA_u_8x16 = vqtbl1q_u8(packedA_u_8x16, shuffle_u_8x16);
407 const uint8x8_t intermediateB_u_8x8 = vext_u8(packedB_u_8x8, packedB_u_8x8, 3);
409 const uint8x16_t target_u_8x16 = vextq_u8(intermediateA_u_8x16, vcombine_u8(intermediateB_u_8x8, intermediateB_u_8x8), 4);
413 constexpr uint8x16_t mask_u_8x16 = {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0xFFu, 0xFFu, 0xFFu};
415 const uint8x16_t packedA_u_8x16 = vld1q_u8(source);
416 const uint8x8_t packedB_u_8x8 = vld1_u8(source + 11);
418 const uint8x8_t packedAA_u_8x8 = vget_low_u8(packedA_u_8x16);
419 const uint8x8_t packedAB_u_8x8 = vget_high_u8(packedA_u_8x16);
421 constexpr uint8x8_t shuffleA_u_8x8 = {8u, 0u, 1u, 2u, 3u, 5u, 6u, 7u};
422 constexpr uint8x8_t shuffleB_u_8x8 = {0u, 2u, 3u, 4u, 5u, 7u, 8u, 8u};
423 const uint8x16_t intermediateA_u_8x16 = vextq_u8(vcombine_u8(vtbl1_u8(packedAA_u_8x8, shuffleA_u_8x8), vtbl1_u8(packedAB_u_8x8, shuffleB_u_8x8)), mask_u_8x16, 1);
425 const uint8x16_t intermediateB_u_8x16 = vcombine_u8(vget_low_u8(mask_u_8x16), vand_u8(packedB_u_8x8, vget_high_u8(mask_u_8x16)));
427 const uint8x16_t target_u_8x16 = vorrq_u8(intermediateA_u_8x16, intermediateB_u_8x16);
431 vst1q_u8(target, target_u_8x16);
434 template <
unsigned int tStep01,
unsigned int tStep12>
437 static_assert(0u < tStep01 && tStep01 < tStep12 && tStep12 < 1023u,
"Invalid steps");
439 constexpr int8x16_t leftShifts_s_8x16 = {6, 0, 4, 0, 2, 0, 0, 0, 6, 0, 4, 0, 2, 0, 0, 0};
440 constexpr int16x8_t rightShifts_s_16x8 = {-6, -6, -6, -6, -6, -6, -6, -6};
444 const uint8x16_t packedAB_u_8x16 = vld1q_u8(source);
445 const uint8x16_t packedCD_u_8x16 = vld1q_u8(source + 4);
449 constexpr uint8x16_t shuffleAB_u_8x16 = {4u, 0u, 4u, 1u, 4u, 2u, 4u, 3u, 9u, 5u, 9u, 6u, 9u, 7u, 9u, 8u};
450 const uint8x16_t intermediateAB_u_8x16 = vqtbl1q_u8(packedAB_u_8x16, shuffleAB_u_8x16);
452 constexpr uint8x16_t shuffleCD_u_8x16 = {10u, 6u, 10u, 7u, 10u, 8u, 10u, 9u, 15u, 11u, 15u, 12u, 15u, 13u, 15u, 14u};
453 const uint8x16_t intermediateCD_u_8x16 = vqtbl1q_u8(packedCD_u_8x16, shuffleCD_u_8x16);
457 constexpr uint8x8_t shuffleAB_u_8x8 = {4u, 0u, 4u, 1u, 4u, 2u, 4u, 3u};
458 constexpr uint8x8_t shuffleC_u_8x8 = {6u, 2u, 6u, 3u, 6u, 4u, 6u, 5u};
459 constexpr uint8x8_t shuffleD_u_8x8 = {7u, 3u, 7u, 4u, 7u, 5u, 7u, 6u};
461 const uint8x16_t packedAB_u_8x16 = vld1q_u8(source);
462 const uint8x8_t packedForD_u_8x8 = vld1_u8(source + 12);
464 const uint8x8_t packedForA_u_8x8 = vget_low_u8(packedAB_u_8x16);
465 const uint8x8_t packedForB_u_8x8 = vget_low_u8(vextq_u8(packedAB_u_8x16, packedAB_u_8x16, 5));
466 const uint8x8_t packedForC_u_8x8 = vget_high_u8(packedAB_u_8x16);
468 const uint8x16_t intermediateAB_u_8x16 = vcombine_u8(vtbl1_u8(packedForA_u_8x8, shuffleAB_u_8x8), vtbl1_u8(packedForB_u_8x8, shuffleAB_u_8x8));
469 const uint8x16_t intermediateCD_u_8x16 = vcombine_u8(vtbl1_u8(packedForC_u_8x8, shuffleC_u_8x8), vtbl1_u8(packedForD_u_8x8, shuffleD_u_8x8));
476 const uint16x8_t intermediateAB_u_16x8 = vreinterpretq_u16_u8(vshlq_u8(intermediateAB_u_8x16, leftShifts_s_8x16));
477 const uint16x8_t intermediateCD_u_16x8 = vreinterpretq_u16_u8(vshlq_u8(intermediateCD_u_8x16, leftShifts_s_8x16));
482 const uint16x8_t unpackedAB_u_16x8 = vshlq_u16(intermediateAB_u_16x8, rightShifts_s_16x8);
483 const uint16x8_t unpackedCD_u_16x8 = vshlq_u16(intermediateCD_u_16x8, rightShifts_s_16x8);
492 constexpr int16x8_t step01_s_16x8 = {int32_t(tStep01), int32_t(tStep01), int32_t(tStep01), int32_t(tStep01), int32_t(tStep01), int32_t(tStep01), int32_t(tStep01), int32_t(tStep01)};
493 constexpr int16x8_t step12_s_16x8 = {int32_t(tStep12), int32_t(tStep12), int32_t(tStep12), int32_t(tStep12), int32_t(tStep12), int32_t(tStep12), int32_t(tStep12), int32_t(tStep12)};
497 const uint16x8_t isWithin0AB_u_16x8 = vcleq_s16(vreinterpretq_s16_u16(unpackedAB_u_16x8), step01_s_16x8);
498 const uint16x8_t isWithin0CD_u_16x8 = vcleq_s16(vreinterpretq_s16_u16(unpackedCD_u_16x8), step01_s_16x8);
499 const uint8x16_t isWithin0_u_8x16 = vcombine_u8(vmovn_u16(isWithin0AB_u_16x8), vmovn_u16(isWithin0CD_u_16x8));
501 const uint16x8_t isWithin2AB_u_16x8 = vcgtq_s16(vreinterpretq_s16_u16(unpackedAB_u_16x8), step12_s_16x8);
502 const uint16x8_t isWithin2CD_u_16x8 = vcgtq_s16(vreinterpretq_s16_u16(unpackedCD_u_16x8), step12_s_16x8);
503 const uint8x16_t isWithin2_u_8x16 = vcombine_u8(vmovn_u16(isWithin2AB_u_16x8), vmovn_u16(isWithin2CD_u_16x8));
505 const uint8x16_t isWithin1_u_8x16 = vmvnq_u8(vorrq_u8(isWithin0_u_8x16, isWithin2_u_8x16));
508 const int16x4_t unpackedA_s_16x4 = vreinterpret_s16_u16(vget_low_u8(unpackedAB_u_16x8));
509 const int16x4_t unpackedB_s_16x4 = vreinterpret_s16_u16(vget_high_u8(unpackedAB_u_16x8));
510 const int16x4_t unpackedC_s_16x4 = vreinterpret_s16_u16(vget_low_u8(unpackedCD_u_16x8));
511 const int16x4_t unpackedD_s_16x4 = vreinterpret_s16_u16(vget_high_u8(unpackedCD_u_16x8));
514 const uint16x8_t resultAB0_u_16x8 = vcombine_u16(vqrshrun_n_s32(vmull_s16(m0_s_16x4, unpackedA_s_16x4), 8), vqrshrun_n_s32(vmull_s16(m0_s_16x4, unpackedB_s_16x4), 8));
515 const uint16x8_t resultCD0_u_16x8 = vcombine_u16(vqrshrun_n_s32(vmull_s16(m0_s_16x4, unpackedC_s_16x4), 8), vqrshrun_n_s32(vmull_s16(m0_s_16x4, unpackedD_s_16x4), 8));
518 const int16x8_t resultAB1_s_16x8 = vaddq_s16(c1_s_16x8, vcombine_s16(vrshrn_n_s32(vmull_s16(m1_s_16x4, unpackedA_s_16x4), 8), vrshrn_n_s32(vmull_s16(m1_s_16x4, unpackedB_s_16x4), 8)));
519 const int16x8_t resultCD1_s_16x8 = vaddq_s16(c1_s_16x8, vcombine_s16(vrshrn_n_s32(vmull_s16(m1_s_16x4, unpackedC_s_16x4), 8), vrshrn_n_s32(vmull_s16(m1_s_16x4, unpackedD_s_16x4), 8)));
522 const int16x8_t resultAB2_s_16x8 = vaddq_s16(c2_s_16x8, vcombine_s16(vrshrn_n_s32(vmull_s16(m2_s_16x4, unpackedA_s_16x4), 8), vrshrn_n_s32(vmull_s16(m2_s_16x4, unpackedB_s_16x4), 8)));
523 const int16x8_t resultCD2_s_16x8 = vaddq_s16(c2_s_16x8, vcombine_s16(vrshrn_n_s32(vmull_s16(m2_s_16x4, unpackedC_s_16x4), 8), vrshrn_n_s32(vmull_s16(m2_s_16x4, unpackedD_s_16x4), 8)));
525 const uint8x16_t result0_u_8x16 = vcombine_u8(vqmovn_u16(resultAB0_u_16x8), vqmovn_u16(resultCD0_u_16x8));
526 const uint8x16_t result1_u_8x16 = vcombine_u8(vqmovun_s16(resultAB1_s_16x8), vqmovun_s16(resultCD1_s_16x8));
527 const uint8x16_t result2_u_8x16 = vcombine_u8(vqmovun_s16(resultAB2_s_16x8), vqmovun_s16(resultCD2_s_16x8));
531 const uint8x16_t result_u_8x16 = vorrq_u8(vorrq_u8(vandq_u8(result0_u_8x16, isWithin0_u_8x16), vandq_u8(result1_u_8x16, isWithin1_u_8x16)), vandq_u8(result2_u_8x16, isWithin2_u_8x16));
533 vst1q_u8(target, result_u_8x16);
This is the base class for all frame converter classes.
Definition: FrameConverter.h:32
ConversionFlag
Definition of individual conversion flags.
Definition: FrameConverter.h:39
static void convertGenericPixelFormat(const TSource *source, TTarget *target, const unsigned int width, const unsigned int height, const unsigned int sourceStrideElements, const unsigned int targetStrideElements, const ConversionFlag flag, const RowConversionFunction< TSource, TTarget > rowConversionFunction, const RowReversePixelOrderInPlaceFunction< TTarget > targetReversePixelOrderInPlaceFunction, const bool areContinuous, const void *options, Worker *worker)
Converts a frame with generic pixel format (e.g., RGBA32, BGR24, YUV24, ...) to a frame with generic ...
Definition: FrameConverter.h:3211
This class implements the manager for lookup tables.
Definition: FrameConverterY10_Packed.h:39
const uint8_t * lookupTable(const float gamma)
Returns the lookup table for a gamma compression/correction function.
std::unordered_map< float, Memory > LookupTables
Definition of a map mapping gamma values to the memory of lookup tables.
Definition: FrameConverterY10_Packed.h:43
LookupTables lookupTables_
The lookup tables.
Definition: FrameConverterY10_Packed.h:61
Lock lock_
The lock of the manager.
Definition: FrameConverterY10_Packed.h:64
This class provides functions to convert frames with Y10_PACKED pixel format.
Definition: FrameConverterY10_Packed.h:32
static void convertY10_PackedToBGR24(const uint8_t *const source, uint8_t *const target, const unsigned int width, const unsigned int height, const ConversionFlag flag, const unsigned int sourcePaddingElements, const unsigned int targetPaddingElements, Worker *worker=nullptr)
Converts a Y10_PACKED frame to a RGB24 frame.
Definition: FrameConverterY10_Packed.h:283
static void convertY10_PackedToY10(const uint8_t *const source, uint16_t *const target, const unsigned int width, const unsigned int height, const ConversionFlag flag, const unsigned int sourcePaddingElements, const unsigned int targetPaddingElements, Worker *worker=nullptr)
Converts a Y10_PACKED frame to a Y10 frame, so that this function simply unpacks the 10 bits.
Definition: FrameConverterY10_Packed.h:267
static void convertY10_PackedToY8GammaApproximated(const uint8_t *const source, uint8_t *const target, const unsigned int width, const unsigned int height, const ConversionFlag flag, const float gamma, const unsigned int sourcePaddingElements, const unsigned int targetPaddingElements, Worker *worker=nullptr)
Converts a Y10_PACKED frame to a Y8 frame by applying gamma compression/correction using a 3-step lin...
static void convertRowY10_PackedToY8GammaLUT(const uint8_t *source, uint8_t *target, const size_t size, const void *parameters)
Converts a Y10_Packed row to a Y8 row by applying gamma compression/correction with a lookup table.
static OCEAN_FORCE_INLINE void convert16PixelY10_PackedToY8LinearNEON(const uint8_t *const source, uint8_t *const target)
Converts 16 pixels (20 elements) of a Y10_Packed buffer to 16 Y8 pixels by applying a liner conversio...
Definition: FrameConverterY10_Packed.h:395
static void convertRowY10_PackedToY10(const uint8_t *source, uint16_t *target, const size_t size, const void *unusedParameters=nullptr)
Converts a Y10_Packed row to a Y10 row.
static void convertRowY10_PackedToY8Linear(const uint8_t *source, uint8_t *target, const size_t size, const void *unusedParameters=nullptr)
Converts a Y10_Packed row to a Y8 row.
static OCEAN_FORCE_INLINE void convert16PixelY10_PackedToY8ApproximatedNEON(const uint8_t *const source, const int16x4_t &m0_256_s_16x4, const int16x4_t &m1_256_s_16x4, const int16x4_t &m2_256_s_16x4, const int16x8_t &c1_s_16x8, const int16x8_t &c2_s_16x8, uint8_t *const target)
Converts 16 pixels (20 elements) of a Y10_Packed buffer to 16 Y8 pixels by applying gamma compression...
Definition: FrameConverterY10_Packed.h:435
static void convertY10_PackedToY8Linear(const uint8_t *const source, uint8_t *const target, const unsigned int width, const unsigned int height, const ConversionFlag flag, const unsigned int sourcePaddingElements, const unsigned int targetPaddingElements, Worker *worker=nullptr)
Converts a Y10_PACKED frame to a Y8 frame.
Definition: FrameConverterY10_Packed.h:233
static void convertY10_PackedToY8GammaLUT(const uint8_t *const source, uint8_t *const target, const unsigned int width, const unsigned int height, const ConversionFlag flag, const float gamma, const unsigned int sourcePaddingElements, const unsigned int targetPaddingElements, Worker *worker=nullptr)
Converts a Y10_PACKED frame to a Y8 frame by applying gamma compression/correction using a lookup tab...
Definition: FrameConverterY10_Packed.h:249
static void convertRowY10_PackedToYYY24Linear(const uint8_t *source, uint8_t *target, const size_t size, const void *unusedParameters=nullptr)
Converts a Y10_Packed row to a RGB24 row, or BGR24 row.
static void convertRowY10_PackedToY8GammaApproximated(const uint8_t *source, uint8_t *target, const size_t size, const void *parameters)
Converts a Y10_Packed row to a Y8 row by applying gamma compression/correction with a 3-step linear i...
Definition: FrameConverterY10_Packed.h:305
static void convertY10_PackedToRGB24(const uint8_t *const source, uint8_t *const target, const unsigned int width, const unsigned int height, const ConversionFlag flag, const unsigned int sourcePaddingElements, const unsigned int targetPaddingElements, Worker *worker=nullptr)
Converts a Y10_PACKED frame to a RGB24 frame.
Definition: FrameConverterY10_Packed.h:288
This class implements a recursive lock object.
Definition: Lock.h:31
This template class is the base class for all singleton objects.
Definition: Singleton.h:71
static LookupTableManager & get()
Returns a reference to the unique object.
Definition: Singleton.h:115
This class implements a worker able to distribute function calls over different threads.
Definition: Worker.h:33
The namespace covering the entire Ocean framework.
Definition: Accessor.h:15