Commit bae04ee8 authored by Damien Fouilleul's avatar Damien Fouilleul

- video_chroma: added support for IA-32/64 SSE2 accelaration (128 bit vector...

- video_chroma: added support for IA-32/64 SSE2 accelaration (128 bit vector integer registers), needs LOADS of testing
parent 9af41291
......@@ -1269,22 +1269,14 @@ MMXEXT_MODULES="memcpymmxext"
#MMXEXT_MODULES="${MMXEXT_MODULES} idctmmxext motionmmxext"
THREEDNOW_MODULES="memcpy3dn"
SSE_MODULES=""
SSE2_MODULES=""
ALTIVEC_MODULES="memcpyaltivec i420_yuy2_altivec"
#ALTIVEC_MODULES="${ALTIVEC_MODULES} idctaltivec motionaltivec"
if test "${enable_gprof}" != "yes" -a "${enable_debug}" != "yes"
then
MMX_MODULES="${MMX_MODULES} i420_yuy2_mmx"
fi
AC_CACHE_CHECK([if \$CC groks MMX inline assembly],
[ac_cv_mmx_inline],
[CFLAGS="${CFLAGS_save}"
AC_TRY_COMPILE(,[void *p;asm volatile("packuswb %%mm1,%%mm2"::"r"(p));],
ac_cv_mmx_inline=yes, ac_cv_mmx_inline=no)])
if test "${ac_cv_mmx_inline}" != "no"; then
AC_DEFINE(CAN_COMPILE_MMX, 1, Define if \$CC groks MMX inline assembly.)
ACCEL_MODULES="${ACCEL_MODULES} ${MMX_MODULES}"
SSE2_MODULES="${SSE2_MODULES} i420_yuy2_sse2"
fi
dnl Check for fully workin MMX intrinsics
......@@ -1312,6 +1304,41 @@ if test "${ac_cv_c_mmx_intrinsics}" != "no"; then
VLC_ADD_CFLAGS([i420_rgb_mmx],[-mmmx])
fi
dnl Check for fully workin SSE2 intrinsics
dnl We need support for -mmmx, we need <emmintrin.h>, and we also need a
dnl working compiler (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=23963)
AC_CACHE_CHECK([if \$CC groks SSE2 intrinsics],
[ac_cv_c_sse2_intrinsics],
[CFLAGS="${CFLAGS_save} -O -msse2"
AC_TRY_COMPILE([#include <emmintrin.h>
#include <stdint.h>
uint64_t frobzor;],
[__m128i a, b, c;
a = b = c = _mm_set1_epi64((__m64)frobzor);
a = _mm_slli_epi16(a, 3);
a = _mm_adds_epi16(a, b);
c = _mm_srli_epi16(c, 8);
c = _mm_slli_epi16(c, 3);
b = _mm_adds_epi16(b, c);
a = _mm_unpacklo_epi8(a, b);
frobzor = (uint64_t)_mm_movepi64_pi64(a);],
[ac_cv_c_sse2_intrinsics=yes],
[ac_cv_c_sse2_intrinsics=no])])
if test "${ac_cv_c_sse2_intrinsics}" != "no"; then
AC_DEFINE(HAVE_SSE2_INTRINSICS, 1, Define if SSE2 intrinsics are available.)
dnl VLC_ADD_CFLAGS([i420_rgb_sse2],[-msse2])
fi
AC_CACHE_CHECK([if \$CC groks MMX inline assembly],
[ac_cv_mmx_inline],
[CFLAGS="${CFLAGS_save}"
AC_TRY_COMPILE(,[void *p;asm volatile("packuswb %%mm1,%%mm2"::"r"(p));],
ac_cv_mmx_inline=yes, ac_cv_mmx_inline=no)])
if test "${ac_cv_mmx_inline}" != "no"; then
AC_DEFINE(CAN_COMPILE_MMX, 1, Define if \$CC groks MMX inline assembly.)
ACCEL_MODULES="${ACCEL_MODULES} ${MMX_MODULES}"
fi
AC_CACHE_CHECK([if \$CC groks MMX EXT inline assembly],
[ac_cv_mmxext_inline],
[CFLAGS="${CFLAGS_save}"
......@@ -1342,6 +1369,16 @@ if test "${ac_cv_sse_inline}" != "no" -a "${SYS}" != "solaris"; then
ACCEL_MODULES="${ACCEL_MODULES} ${SSE_MODULES}"
fi
AC_CACHE_CHECK([if \$CC groks SSE2 inline assembly],
[ac_cv_sse2_inline],
[CFLAGS="${CFLAGS_save}"
AC_TRY_COMPILE(,[void *p;asm volatile("punpckhqdq %%xmm1,%%xmm2"::"r"(p));],
ac_cv_sse2_inline=yes, ac_cv_sse2_inline=no)])
if test "${ac_cv_sse2_inline}" != "no" -a "${SYS}" != "solaris"; then
AC_DEFINE(CAN_COMPILE_SSE2, 1, Define if \$CC groks SSE2 inline assembly.)
ACCEL_MODULES="${ACCEL_MODULES} ${SSE2_MODULES}"
fi
if test "${SYS}" != "mingw32" -a "${SYS}" != "mingwce"; then
AC_CACHE_CHECK([if \$CC groks AltiVec inline assembly],
[ac_cv_altivec_inline],
......@@ -1494,6 +1531,11 @@ then
ARCH="${ARCH} mmx"
VLC_ADD_BUILTINS([${ACCEL_MODULES}])
fi
if test "${host_cpu}" = "i686" -o "${host_cpu}" = "x86_64"
then
ARCH="${ARCH} sse sse2"
VLC_ADD_BUILTINS([${ACCEL_MODULES}])
fi
dnl
dnl Memory usage
......
......@@ -23,6 +23,11 @@ SOURCES_i420_yuy2_mmx = \
i420_yuy2.h \
$(NULL)
SOURCES_i420_yuy2_sse2 = \
i420_yuy2.c \
i420_yuy2.h \
$(NULL)
SOURCES_i420_yuy2_altivec = \
i420_yuy2.c \
i420_yuy2.h \
......
......@@ -5,6 +5,7 @@
* $Id$
*
* Authors: Samuel Hocevar <sam@zoy.org>
* Damien Fouilleul <damien@videolan.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -42,6 +43,8 @@
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,IUYV,cyuv,Y211"
#elif defined (MODULE_NAME_IS_i420_yuy2_mmx)
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,IUYV,cyuv"
#elif defined (MODULE_NAME_IS_i420_yuy2_sse2)
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,IUYV,cyuv"
#elif defined (MODULE_NAME_IS_i420_yuy2_altivec)
# define DEST_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422"
#endif
......@@ -63,8 +66,9 @@ static void I420_Y211 ( vout_thread_t *, picture_t *, picture_t * );
#endif
#ifdef MODULE_NAME_IS_i420_yuy2_mmx
static uint64_t i_00ffw;
static uint64_t i_80w;
/* Initialize MMX-specific constants */
static const uint64_t i_00ffw = 0x00ff00ff00ff00ffULL;
static const uint64_t i_80w = 0x0000000080808080ULL;
#endif
/*****************************************************************************
......@@ -78,9 +82,10 @@ vlc_module_begin();
set_description( _("MMX conversions from " SRC_FOURCC " to " DEST_FOURCC) );
set_capability( "chroma", 100 );
add_requirement( MMX );
/* Initialize MMX-specific constants */
i_00ffw = 0x00ff00ff00ff00ffULL;
i_80w = 0x0000000080808080ULL;
#elif defined (MODULE_NAME_IS_i420_yuy2_sse2)
set_description( _("SSE2 conversions from " SRC_FOURCC " to " DEST_FOURCC) );
set_capability( "chroma", 120 );
add_requirement( SSE2 );
#elif defined (MODULE_NAME_IS_i420_yuy2_altivec)
set_description(
_("AltiVec conversions from " SRC_FOURCC " to " DEST_FOURCC) );
......@@ -125,7 +130,6 @@ static int Activate( vlc_object_t *p_this )
case VLC_FOURCC('Y','4','2','2'):
p_vout->chroma.pf_convert = I420_UYVY;
break;
#if !defined (MODULE_NAME_IS_i420_yuy2_altivec)
case VLC_FOURCC('I','U','Y','V'):
p_vout->chroma.pf_convert = I420_IUYV;
......@@ -256,6 +260,7 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
......@@ -265,20 +270,23 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
p_y2 += p_source->p[Y_PLANE].i_pitch;
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
for( i_x = p_vout->render.i_width / 2 ; i_x-- ; )
for( i_x = p_vout->render.i_width / 8; i_x-- ; )
{
C_YUV420_YUYV( );
C_YUV420_YUYV( );
C_YUV420_YUYV( );
C_YUV420_YUYV( );
}
#else
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
{
MMX_CALL( MMX_YUV420_YUYV );
}
#endif
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
#endif
p_y1 += i_source_margin;
p_y2 += i_source_margin;
......@@ -288,9 +296,77 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
p_line2 += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i420_yuy2_mmx)
__asm__ __volatile__("emms" :: );
#endif
#if defined (MODULE_NAME_IS_i420_yuy2_altivec)
}
#endif
#else // defined(MODULE_NAME_IS_i420_yuy2_sse2)
/*
** SSE2 128 bytes fetch/store instructions are faster
** if memory access is 16 bytes aligned
*/
if( 0 == (15 & (p_source->p[Y_PLANE].i_pitch|p_dest->p->i_pitch|
((int)p_line2|(int)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YUYV_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YUYV_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
#endif // defined(MODULE_NAME_IS_i420_yuy2_sse2)
}
/*****************************************************************************
......@@ -393,6 +469,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
......@@ -420,9 +497,78 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i420_yuy2_mmx)
__asm__ __volatile__("emms" :: );
#endif
#if defined (MODULE_NAME_IS_i420_yuy2_altivec)
}
#endif
#else // defined(MODULE_NAME_IS_i420_yuy2_sse2)
/*
** SSE2 128 bytes fetch/store instructions are faster
** if memory access is 16 bytes aligned
*/
if( 0 == (15 & (p_source->p[Y_PLANE].i_pitch|p_dest->p->i_pitch|
((int)p_line2|(int)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YVYU_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YVYU_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
#endif // defined(MODULE_NAME_IS_i420_yuy2_sse2)
}
/*****************************************************************************
......@@ -525,6 +671,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
......@@ -564,6 +711,70 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
#if defined (MODULE_NAME_IS_i420_yuy2_altivec)
}
#endif
#else // defined(MODULE_NAME_IS_i420_yuy2_sse2)
/*
** SSE2 128 bytes fetch/store instructions are faster
** if memory access is 16 bytes aligned
*/
if( 0 == (15 & (p_source->p[Y_PLANE].i_pitch|p_dest->p->i_pitch|
((int)p_line2|(int)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
#endif // defined(MODULE_NAME_IS_i420_yuy2_sse2)
}
#if !defined (MODULE_NAME_IS_i420_yuy2_altivec)
......@@ -601,6 +812,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 -= 3 * p_dest->p->i_pitch;
......@@ -611,7 +823,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i420_yuy2)
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_UYVY( );
C_YUV420_UYVY( );
C_YUV420_UYVY( );
......@@ -628,6 +840,74 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
#if defined (MODULE_NAME_IS_i420_yuy2_mmx)
__asm__ __volatile__("emms" :: );
#endif
#else // defined(MODULE_NAME_IS_i420_yuy2_sse2)
/*
** SSE2 128 bytes fetch/store instructions are faster
** if memory access is 16 bytes aligned
*/
if( 0 == (15 & (p_source->p[Y_PLANE].i_pitch|p_dest->p->i_pitch|
((int)p_line2|(int)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
p_y1 += i_source_margin;
p_y2 += i_source_margin;
p_u += i_source_margin_c;
p_v += i_source_margin_c;
p_line1 += i_dest_margin;
p_line2 += i_dest_margin;
}
}
#endif // defined(MODULE_NAME_IS_i420_yuy2_sse2)
}
#endif // !defined (MODULE_NAME_IS_i420_yuy2_altivec)
......@@ -675,4 +955,3 @@ static void I420_Y211( vout_thread_t *p_vout, picture_t *p_source,
}
}
#endif
......@@ -5,6 +5,7 @@
* $Id$
*
* Authors: Samuel Hocevar <sam@zoy.org>
* Damien Fouilleul <damien@videolan.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -32,7 +33,7 @@
: "r" (p_line1), "r" (p_line2), "r" (p_y1), "r" (p_y2), \
"r" (p_u), "r" (p_v) ); \
p_line1 += 16; p_line2 += 16; p_y1 += 8; p_y2 += 8; p_u += 4; p_v += 4; \
} while(0); \
} while(0)
#define MMX_YUV420_YUYV " \n\
movq (%2), %%mm0 # Load 8 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
......@@ -111,7 +112,135 @@ punpcklbw %%mm2, %%mm1 # v2 Y6 u2 Y4 v0 Y2 u0 Y0 \n\
movq %%mm1, (%1) # Store YUYV \n\
"
#else
#elif defined( MODULE_NAME_IS_i420_yuy2_sse2 )
/* SSE2 */
#define SSE2_CALL(SSE2_INSTRUCTIONS) \
do { \
__asm__ __volatile__( \
".p2align 3 \n\t" \
SSE2_INSTRUCTIONS \
: \
: "r" (p_line1), "r" (p_line2), "r" (p_y1), "r" (p_y2), \
"r" (p_u), "r" (p_v) ); \
p_line1 += 32; p_line2 += 32; p_y1 += 16; p_y2 += 16; \
p_u += 8; p_v += 8; \
} while(0)
#define SSE2_YUV420_YUYV_ALIGNED " \n\
movdqa (%2), %%xmm0 # Load 16 Y y15 y14 y13 .. y2 y1 y0 \n\
movq (%4), %%xmm1 # Load 8 Cb u7 u6 u5 u4 u3 u2 u1 u0 \n\
movq (%5), %%xmm2 # Load 8 Cr v7 06 v5 v4 v3 v2 v1 v0 \n\
punpcklbw %%xmm2, %%xmm1 # v7 u7 v6 u6 .. u1 v0 u0 \n\
movdqa %%xmm0, %%xmm2 # y15 y14 y13 .. y2 y1 y0 \n\
punpcklbw %%xmm1, %%xmm2 # v3 y7 u3 .. v0 y1 u0 y0 \n\
movdqa %%xmm2, (%0) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
movdqa %%xmm0, 16(%0) # Store high YUYV \n\
movdqa (%3), %%xmm0 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
movdqa %%xmm0, %%xmm2 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
punpcklbw %%xmm1, %%xmm2 # v1 Y3 u1 Y2 v0 Y1 u0 Y0 \n\
movdqa %%xmm2, (%1) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # v3 Y7 u3 Y6 v2 Y5 u2 Y4 \n\
movdqa %%xmm0, 16(%1) # Store high YUYV \n\
"
#define SSE2_YUV420_YUYV_UNALIGNED " \n\
movdqu (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movq (%4), %%xmm1 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movq (%5), %%xmm2 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%xmm2, %%xmm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movdqa %%xmm0, %%xmm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%xmm1, %%xmm2 # v1 y3 u1 y2 v0 y1 u0 y0 \n\
movdqu %%xmm2, (%0) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # v3 y7 u3 y6 v2 y5 u2 y4 \n\
movdqu %%xmm0, 16(%0) # Store high YUYV \n\
movdqu (%3), %%xmm0 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
movdqa %%xmm0, %%xmm2 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
punpcklbw %%xmm1, %%xmm2 # v1 Y3 u1 Y2 v0 Y1 u0 Y0 \n\
movdqu %%xmm2, (%1) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # v3 Y7 u3 Y6 v2 Y5 u2 Y4 \n\
movdqu %%xmm0, 16(%1) # Store high YUYV \n\
"
#define SSE2_YUV420_YVYU_ALIGNED " \n\
movdqa (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movq (%4), %%xmm2 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movq (%5), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%xmm2, %%xmm1 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
movdqa %%xmm0, %%xmm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%xmm1, %%xmm2 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
movdqa %%xmm2, (%0) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
movdqa %%xmm0, 16(%0) # Store high YUYV \n\
movdqa (%3), %%xmm0 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
movdqa %%xmm0, %%xmm2 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
punpcklbw %%xmm1, %%xmm2 # u1 Y3 v1 Y2 u0 Y1 v0 Y0 \n\
movdqa %%xmm2, (%1) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # u3 Y7 v3 Y6 u2 Y5 v2 Y4 \n\
movdqa %%xmm0, 16(%1) # Store high YUYV \n\
"
#define SSE2_YUV420_YVYU_UNALIGNED " \n\
movdqu (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movq (%4), %%xmm2 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movq (%5), %%xmm1 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%xmm2, %%xmm1 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
movdqu %%xmm0, %%xmm2 # y7 y6 y5 y4 y3 y2 y1 y0 \n\
punpcklbw %%xmm1, %%xmm2 # u1 y3 v1 y2 u0 y1 v0 y0 \n\
movdqu %%xmm2, (%0) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # u3 y7 v3 y6 u2 y5 v2 y4 \n\
movdqu %%xmm0, 16(%0) # Store high YUYV \n\
movdqu (%3), %%xmm0 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
movdqu %%xmm0, %%xmm2 # Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
punpcklbw %%xmm1, %%xmm2 # u1 Y3 v1 Y2 u0 Y1 v0 Y0 \n\
movdqu %%xmm2, (%1) # Store low YUYV \n\
punpckhbw %%xmm1, %%xmm0 # u3 Y7 v3 Y6 u2 Y5 v2 Y4 \n\
movdqu %%xmm0, 16(%1) # Store high YUYV \n\
"
#define SSE2_YUV420_UYVY_ALIGNED " \n\
movdqa (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movdqa (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
movq (%4), %%xmm1 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movq (%5), %%xmm2 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%xmm2, %%xmm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movdqa %%xmm1, %%xmm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
punpcklbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
movdqa %%xmm2, (%0) # Store low UYVY \n\
movdqa %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
punpckhbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
movdqa %%xmm2, 16(%0) # Store high UYVY \n\
movdqa %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
punpcklbw %%xmm3, %%xmm2 # Y3 v1 Y2 u1 Y1 v0 Y0 u0 \n\
movdqa %%xmm2, (%1) # Store low UYVY \n\
punpckhbw %%xmm3, %%xmm1 # Y7 v3 Y6 u3 Y5 v2 Y4 u2 \n\
movdqa %%xmm1, 16(%1) # Store high UYVY \n\
"
#define SSE2_YUV420_UYVY_UNALIGNED " \n\
movdqu (%2), %%xmm0 # Load 16 Y y7 y6 y5 y4 y3 y2 y1 y0 \n\
movdqu (%3), %%xmm3 # Load 16 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 \n\
movq (%4), %%xmm1 # Load 8 Cb 00 00 00 00 u3 u2 u1 u0 \n\
movq (%5), %%xmm2 # Load 8 Cr 00 00 00 00 v3 v2 v1 v0 \n\
punpcklbw %%xmm2, %%xmm1 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
movdqu %%xmm1, %%xmm2 # v3 u3 v2 u2 v1 u1 v0 u0 \n\
punpcklbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
movdqu %%xmm2, (%0) # Store low UYVY \n\
movdqu %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
punpckhbw %%xmm0, %%xmm2 # y3 v1 y2 u1 y1 v0 y0 u0 \n\
movdqu %%xmm2, 16(%0) # Store high UYVY \n\
movdqu %%xmm1, %%xmm2 # u3 v3 u2 v2 u1 v1 u0 v0 \n\
punpcklbw %%xmm3, %%xmm2 # Y3 v1 Y2 u1 Y1 v0 Y0 u0 \n\
movdqu %%xmm2, (%1) # Store low UYVY \n\
punpckhbw %%xmm3, %%xmm1 # Y7 v3 Y6 u3 Y5 v2 Y4 u2 \n\
movdqu %%xmm1, 16(%1) # Store high UYVY \n\
"
#endif
/* Used in both accelerated and C modules */
#define C_YUV420_YVYU( ) \
*(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
......@@ -127,9 +256,7 @@ movq %%mm1, (%1) # Store YUYV \n\
*(p_line2)++ = *(p_y2); p_y2 += 2; \
*(p_line1)++ = *(p_line2)++ = *(p_v) - 0x80; p_v += 2; \
#endif
/* Used in both MMX and C modules */
#define C_YUV420_YUYV( ) \
*(p_line1)++ = *(p_y1)++; *(p_line2)++ = *(p_y2)++; \
*(p_line1)++ = *(p_line2)++ = *(p_u)++; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment