Commit 2e2178f7 authored by Antoine Cellerier's avatar Antoine Cellerier

Chroma API change. Chromas are now normal video filters (almost).

parent efc57198
......@@ -221,7 +221,6 @@ typedef struct aout_filter_t aout_filter_t;
/* Video */
typedef struct vout_thread_t vout_thread_t;
typedef struct vout_sys_t vout_sys_t;
typedef struct chroma_sys_t chroma_sys_t;
typedef video_format_t video_frame_format_t;
typedef struct picture_t picture_t;
......
......@@ -28,6 +28,10 @@
#ifndef _VLC_ES_H
#define _VLC_ES_H 1
/* FIXME: i'm not too sure about this include but it fixes compilation of
* video chromas -- dionoea */
#include "vlc_common.h"
/**
* \file
* This file defines the elementary streams format types
......@@ -130,6 +134,9 @@ struct video_format_t
unsigned int i_frame_rate_base; /**< frame rate denominator */
int i_rmask, i_gmask, i_bmask; /**< color masks for RGB chroma */
int i_rrshift, i_lrshift;
int i_rgshift, i_lgshift;
int i_rbshift, i_lbshift;
video_palette_t *p_palette; /**< video palette from demuxer */
};
......
......@@ -60,6 +60,7 @@ struct filter_t
config_chain_t * p_cfg;
picture_t * ( * pf_video_filter ) ( filter_t *, picture_t * );
void ( * pf_video_filter_io ) ( filter_t *, picture_t *, picture_t * ); /* Used by video filters with a preallocated output buffer (ie chroma conversion modules) */
block_t * ( * pf_audio_filter ) ( filter_t *, block_t * );
void ( * pf_video_blend ) ( filter_t *, picture_t *,
picture_t *, picture_t *,
......
......@@ -68,6 +68,7 @@
#define VLC_OBJECT_HTTPD_HOST (-30)
#define VLC_OBJECT_INTERACTION (-32)
#define VLC_OBJECT_CHROMA (-33)
#define VLC_OBJECT_GENERIC (-666)
......
/*****************************************************************************
* vlc_video.h: common video definitions
*****************************************************************************
* Copyright (C) 1999 - 2005 the VideoLAN team
* Copyright (C) 1999 - 2008 the VideoLAN team
* $Id$
*
* Authors: Vincent Seguin <seguin@via.ecp.fr>
......@@ -365,31 +365,6 @@ VLC_EXPORT( int, __vout_AllocatePicture,( vlc_object_t *p_this, picture_t *p_pic
* @{
*/
/**
* Chroma conversion function
*
* This is the prototype common to all conversion functions.
* \param p_vout video output thread
* \param p_source source picture
* \param p_dest destination picture
* Picture width and source dimensions must be multiples of 16.
*/
typedef void (vout_chroma_convert_t)( vout_thread_t *,
picture_t *, picture_t * );
typedef struct vout_chroma_t
{
/** conversion functions */
vout_chroma_convert_t *pf_convert;
/** Private module-dependent data */
chroma_sys_t * p_sys; /* private data */
/** Plugin used and shortcuts to access its capabilities */
module_t * p_module;
} vout_chroma_t;
/** Maximum numbers of video filters2 that can be attached to a vout */
#define MAX_VFILTERS 10
......@@ -462,7 +437,7 @@ struct vout_thread_t
picture_heap_t render; /**< rendered pictures */
picture_heap_t output; /**< direct buffers */
bool b_direct; /**< rendered are like direct ? */
vout_chroma_t chroma; /**< translation tables */
filter_t *p_chroma; /**< translation tables */
video_format_t fmt_render; /* render format (from the decoder) */
video_format_t fmt_in; /* input (modified render) format */
......
......@@ -57,7 +57,7 @@ static void ChromaConversion( vout_thread_t *, picture_t *, picture_t * );
* This structure is part of the chroma transformation descriptor, it
* describes the chroma plugin specific properties.
*****************************************************************************/
struct chroma_sys_t
struct filter_sys_t
{
int i_src_vlc_chroma;
int i_src_ffmpeg_chroma;
......@@ -74,50 +74,53 @@ struct chroma_sys_t
*****************************************************************************/
int OpenChroma( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
int i_ffmpeg_chroma[2], i_vlc_chroma[2], i;
/*
* Check the source chroma first, then the destination chroma
*/
i_vlc_chroma[0] = p_vout->render.i_chroma;
i_vlc_chroma[1] = p_vout->output.i_chroma;
i_vlc_chroma[0] = p_filter->fmt_in.video.i_chroma;
i_vlc_chroma[1] = p_filter->fmt_out.video.i_chroma;
for( i = 0; i < 2; i++ )
{
i_ffmpeg_chroma[i] = GetFfmpegChroma( i_vlc_chroma[i] );
if( i_ffmpeg_chroma[i] < 0 ) return VLC_EGENERIC;
}
p_vout->chroma.pf_convert = ChromaConversion;
p_filter->pf_video_filter_io = ChromaConversion;
p_vout->chroma.p_sys = malloc( sizeof( chroma_sys_t ) );
if( p_vout->chroma.p_sys == NULL )
p_filter->p_sys = malloc( sizeof( filter_sys_t ) );
if( p_filter->p_sys == NULL )
{
return VLC_ENOMEM;
}
p_vout->chroma.p_sys->i_src_vlc_chroma = p_vout->render.i_chroma;
p_vout->chroma.p_sys->i_dst_vlc_chroma = p_vout->output.i_chroma;
p_vout->chroma.p_sys->i_src_ffmpeg_chroma = i_ffmpeg_chroma[0];
p_vout->chroma.p_sys->i_dst_ffmpeg_chroma = i_ffmpeg_chroma[1];
p_filter->.p_sys->i_src_vlc_chroma = p_vout->render.i_chroma;
p_filter->p_sys->i_dst_vlc_chroma = p_vout->output.i_chroma;
p_filter->p_sys->i_src_ffmpeg_chroma = i_ffmpeg_chroma[0];
p_filter->p_sys->i_dst_ffmpeg_chroma = i_ffmpeg_chroma[1];
if( ( p_vout->render.i_height != p_vout->output.i_height ||
p_vout->render.i_width != p_vout->output.i_width ) &&
( p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('I','4','2','0') ||
p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ))
if( ( p_filter->fmt_in.video.i_height != p_filter->fmt_out.video.i_height ||
p_filter->fmt_in.video.i_width != p_filter->fmt_out.video.i_width ) &&
( p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('I','4','2','0') ||
p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ))
{
msg_Dbg( p_vout, "preparing to resample picture" );
p_vout->chroma.p_sys->p_rsc =
img_resample_init( p_vout->output.i_width, p_vout->output.i_height,
p_vout->render.i_width, p_vout->render.i_height );
avpicture_alloc( &p_vout->chroma.p_sys->tmp_pic,
p_vout->chroma.p_sys->i_dst_ffmpeg_chroma,
p_vout->render.i_width, p_vout->render.i_height );
msg_Dbg( p_filter, "preparing to resample picture" );
p_filter->p_sys->p_rsc =
img_resample_init( p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height );
avpicture_alloc( &p_filter->p_sys->tmp_pic,
p_filter->p_sys->i_dst_ffmpeg_chroma,
p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height );
}
else
{
msg_Dbg( p_vout, "no resampling" );
p_vout->chroma.p_sys->p_rsc = NULL;
msg_Dbg( p_filter, "no resampling" );
p_filter->p_sys->p_rsc = NULL;
}
/* libavcodec needs to be initialized for some chroma conversions */
......@@ -129,7 +132,7 @@ int OpenChroma( vlc_object_t *p_this )
/*****************************************************************************
* ChromaConversion: actual chroma conversion function
*****************************************************************************/
static void ChromaConversion( vout_thread_t *p_vout,
static void ChromaConversion( filter_t *p_filter,
picture_t *p_src, picture_t *p_dest )
{
AVPicture src_pic;
......@@ -149,38 +152,40 @@ static void ChromaConversion( vout_thread_t *p_vout,
}
/* Special cases */
if( p_vout->chroma.p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
p_vout->chroma.p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
if( p_filter->p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
p_filter->p_sys->i_src_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
{
/* Invert U and V */
src_pic.data[1] = p_src->p[2].p_pixels;
src_pic.data[2] = p_src->p[1].p_pixels;
}
if( p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
p_vout->chroma.p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
if( p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','1','2') ||
p_filter->p_sys->i_dst_vlc_chroma == VLC_FOURCC('Y','V','U','9') )
{
/* Invert U and V */
dest_pic.data[1] = p_dest->p[2].p_pixels;
dest_pic.data[2] = p_dest->p[1].p_pixels;
}
if( p_vout->chroma.p_sys->i_src_ffmpeg_chroma == PIX_FMT_RGB24 )
if( p_vout->render.i_bmask == 0x00ff0000 )
p_vout->chroma.p_sys->i_src_ffmpeg_chroma = PIX_FMT_BGR24;
if( p_filter->p_sys->i_src_ffmpeg_chroma == PIX_FMT_RGB24 )
if( p_filter->fmt_in.video.i_bmask == 0x00ff0000 )
p_filter->p_sys->i_src_ffmpeg_chroma = PIX_FMT_BGR24;
if( p_vout->chroma.p_sys->p_rsc )
if( p_filter->p_sys->p_rsc )
{
img_convert( &p_vout->chroma.p_sys->tmp_pic,
p_vout->chroma.p_sys->i_dst_ffmpeg_chroma,
&src_pic, p_vout->chroma.p_sys->i_src_ffmpeg_chroma,
p_vout->render.i_width, p_vout->render.i_height );
img_resample( p_vout->chroma.p_sys->p_rsc, &dest_pic,
&p_vout->chroma.p_sys->tmp_pic );
img_convert( &p_filter->p_sys->tmp_pic,
p_filter->p_sys->i_dst_ffmpeg_chroma,
&src_pic, p_filter->p_sys->i_src_ffmpeg_chroma,
p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height );
img_resample( p_filter->p_sys->p_rsc, &dest_pic,
&p_filter->p_sys->tmp_pic );
}
else
{
img_convert( &dest_pic, p_vout->chroma.p_sys->i_dst_ffmpeg_chroma,
&src_pic, p_vout->chroma.p_sys->i_src_ffmpeg_chroma,
p_vout->render.i_width, p_vout->render.i_height );
img_convert( &dest_pic, p_filter->p_sys->i_dst_ffmpeg_chroma,
&src_pic, p_filter->p_sys->i_src_ffmpeg_chroma,
p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height );
}
}
......@@ -191,176 +196,12 @@ static void ChromaConversion( vout_thread_t *p_vout,
*****************************************************************************/
void CloseChroma( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
if( p_vout->chroma.p_sys->p_rsc )
filter_t *p_filter = (filter_t *)p_this;
if( p_filter->p_sys->p_rsc )
{
img_resample_close( p_vout->chroma.p_sys->p_rsc );
avpicture_free( &p_vout->chroma.p_sys->tmp_pic );
img_resample_close( p_filter->p_sys->p_rsc );
avpicture_free( &p_filter->p_sys->tmp_pic );
}
free( p_vout->chroma.p_sys );
free( p_filter->p_sys );
}
#else
static void ChromaConversion( vout_thread_t *, picture_t *, picture_t * );
/*****************************************************************************
* chroma_sys_t: chroma method descriptor
*****************************************************************************
* This structure is part of the chroma transformation descriptor, it
* describes the chroma plugin specific properties.
*****************************************************************************/
struct chroma_sys_t
{
filter_t *p_swscaler;
};
/*****************************************************************************
* Video Filter2 functions
*****************************************************************************/
struct filter_owner_sys_t
{
vout_thread_t *p_vout;
};
static void PictureRelease( picture_t *p_pic )
{
free( p_pic->p_data_orig );
}
static picture_t *video_new_buffer_filter( filter_t *p_filter )
{
picture_t *p_picture = malloc( sizeof(picture_t) );
if( !p_picture ) return NULL;
if( vout_AllocatePicture( p_filter, p_picture,
p_filter->fmt_out.video.i_chroma,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
p_filter->fmt_out.video.i_aspect )
!= VLC_SUCCESS )
{
free( p_picture );
return NULL;
}
p_picture->pf_release = PictureRelease;
return p_picture;
}
static void video_del_buffer_filter( filter_t *p_filter, picture_t *p_pic )
{
VLC_UNUSED(p_filter);
if( p_pic )
{
free( p_pic->p_data_orig );
free( p_pic );
}
}
/*****************************************************************************
* OpenChroma: allocate a chroma function
*****************************************************************************
* This function allocates and initializes a chroma function
*****************************************************************************/
int OpenChroma( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
chroma_sys_t *p_sys = p_vout->chroma.p_sys;
p_vout->chroma.p_sys = p_sys = malloc( sizeof( chroma_sys_t ) );
if( p_vout->chroma.p_sys == NULL )
{
return VLC_ENOMEM;
}
p_vout->chroma.pf_convert = ChromaConversion;
p_sys->p_swscaler = vlc_object_create( p_vout, VLC_OBJECT_FILTER );
vlc_object_attach( p_sys->p_swscaler, p_vout );
p_sys->p_swscaler->pf_vout_buffer_new = video_new_buffer_filter;
p_sys->p_swscaler->pf_vout_buffer_del = video_del_buffer_filter;
p_sys->p_swscaler->fmt_out.video.i_x_offset =
p_sys->p_swscaler->fmt_out.video.i_y_offset = 0;
p_sys->p_swscaler->fmt_in.video = p_vout->fmt_in;
p_sys->p_swscaler->fmt_out.video = p_vout->fmt_out;
p_sys->p_swscaler->fmt_out.video.i_aspect = p_vout->render.i_aspect;
p_sys->p_swscaler->fmt_in.video.i_chroma = p_vout->render.i_chroma;
p_sys->p_swscaler->fmt_out.video.i_chroma = p_vout->output.i_chroma;
p_sys->p_swscaler->p_module = module_Need( p_sys->p_swscaler,
"video filter2", 0, 0 );
if( p_sys->p_swscaler->p_module )
{
p_sys->p_swscaler->p_owner =
malloc( sizeof( filter_owner_sys_t ) );
if( p_sys->p_swscaler->p_owner )
p_sys->p_swscaler->p_owner->p_vout = p_vout;
}
if( !p_sys->p_swscaler->p_module || !p_sys->p_swscaler->p_owner )
{
vlc_object_detach( p_sys->p_swscaler );
vlc_object_release( p_sys->p_swscaler );
free( p_vout->chroma.p_sys );
return VLC_EGENERIC;
}
return VLC_SUCCESS;
}
/*****************************************************************************
* ChromaConversion: actual chroma conversion function
*****************************************************************************/
static void ChromaConversion( vout_thread_t *p_vout,
picture_t *p_src, picture_t *p_dest )
{
chroma_sys_t *p_sys = (chroma_sys_t *) p_vout->chroma.p_sys;
if( p_sys && p_src && p_dest &&
p_sys->p_swscaler && p_sys->p_swscaler->p_module )
{
picture_t *p_pic;
p_sys->p_swscaler->fmt_in.video = p_vout->fmt_in;
p_sys->p_swscaler->fmt_out.video = p_vout->fmt_out;
#if 0
msg_Dbg( p_vout, "chroma %4.4s (%d) to %4.4s (%d)",
(char *)&p_vout->fmt_in.i_chroma, p_src->i_planes,
(char *)&p_vout->fmt_out.i_chroma, p_dest->i_planes );
#endif
p_pic = p_sys->p_swscaler->pf_vout_buffer_new( p_sys->p_swscaler );
if( p_pic )
{
picture_t *p_dst_pic;
vout_CopyPicture( p_vout, p_pic, p_src );
p_dst_pic = p_sys->p_swscaler->pf_video_filter( p_sys->p_swscaler, p_pic );
vout_CopyPicture( p_vout, p_dest, p_dst_pic );
p_dst_pic->pf_release( p_dst_pic );
}
}
}
/*****************************************************************************
* CloseChroma: free the chroma function
*****************************************************************************
* This function frees the previously allocated chroma function
*****************************************************************************/
void CloseChroma( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
chroma_sys_t *p_sys = (chroma_sys_t *)p_vout->chroma.p_sys;
if( p_sys->p_swscaler && p_sys->p_swscaler->p_module )
{
free( p_sys->p_swscaler->p_owner );
module_Unneed( p_sys->p_swscaler, p_sys->p_swscaler->p_module );
vlc_object_detach( p_sys->p_swscaler );
vlc_object_release( p_sys->p_swscaler );
p_sys->p_swscaler= NULL;
}
free( p_vout->chroma.p_sys );
}
#endif /* !defined(HAVE_LIBSWSCALE_SWSCALE_H) && !defined(HAVE_FFMPEG_SWSCALE_H) */
......@@ -252,13 +252,13 @@ vlc_module_begin();
set_capability( "crop padd", 10 );
set_callbacks( OpenCropPadd, CloseFilter );
set_description( N_("FFmpeg crop padd filter") );
#endif
/* chroma conversion submodule */
add_submodule();
set_capability( "chroma", 50 );
set_callbacks( OpenChroma, CloseChroma );
set_description( N_("FFmpeg chroma conversion") );
#endif
/* video filter submodule */
add_submodule();
......
/*****************************************************************************
* chain.c : chain multiple chroma modules as a last resort solution
*****************************************************************************
* Copyright (C) 2007 the VideoLAN team
* Copyright (C) 2007-2008 the VideoLAN team
* $Id$
*
* Authors: Antoine Cellerier <dionoea at videolan dot org>
......@@ -31,6 +31,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
/*****************************************************************************
......@@ -38,7 +39,7 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void Destroy ( vlc_object_t * );
static void Chain ( vout_thread_t *, picture_t *, picture_t * );
static void Chain ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
......@@ -51,19 +52,21 @@ vlc_module_end();
#define MAX_CHROMAS 2
struct chroma_sys_t
struct filter_sys_t
{
vlc_fourcc_t i_chroma;
vout_chroma_t chroma1;
vout_chroma_t chroma2;
filter_t *p_chroma1;
filter_t *p_chroma2;
picture_t *p_tmp;
picture_t *p_tmp;
};
static const vlc_fourcc_t pi_allowed_chromas[] = {
VLC_FOURCC('I','4','2','0'),
VLC_FOURCC('I','4','2','2'),
VLC_FOURCC('R','V','3','2'),
VLC_FOURCC('R','V','2','4'),
0
};
......@@ -74,8 +77,9 @@ static const vlc_fourcc_t pi_allowed_chromas[] = {
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
#if 0
static int hack = 1;
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
hack++;
if( hack > MAX_CHROMAS )
......@@ -86,25 +90,25 @@ static int Activate( vlc_object_t *p_this )
return VLC_EGENERIC;
}
chroma_sys_t *p_sys = (chroma_sys_t *)malloc( sizeof( chroma_sys_t ) );
filter_sys_t *p_sys = (filter_sys_t *)malloc( sizeof( filter_sys_t ) );
if( !p_sys )
{
hack--;
return VLC_ENOMEM;
}
memset( p_sys, 0, sizeof( chroma_sys_t ) );
memset( p_sys, 0, sizeof( filter_sys_t ) );
int i;
vlc_fourcc_t i_output_chroma = p_vout->output.i_chroma;
vlc_fourcc_t i_render_chroma = p_vout->render.i_chroma;
vlc_fourcc_t i_output_chroma = p_filter->fmt_in.video.i_chroma;
vlc_fourcc_t i_render_chroma = p_filter->fmt_out.video.i_chroma;
for( i = 0; pi_allowed_chromas[i]; i++ )
{
msg_Warn( p_vout, "Trying %4s as a chroma chain",
msg_Warn( p_filter, "Trying %4s as a chroma chain",
(const char *)&pi_allowed_chromas[i] );
p_vout->output.i_chroma = pi_allowed_chromas[i];
p_vout->chroma.p_module = module_Need( p_vout, "chroma", NULL, 0 );
p_vout->output.i_chroma = i_output_chroma;
p_filter->output.i_chroma = pi_allowed_chromas[i];
p_filter->p_chroma1.p_module = module_Need( p_vout, "chroma", NULL, 0 );
p_filter->output.i_chroma = i_output_chroma;
if( !p_vout->chroma.p_module )
continue;
......@@ -136,15 +140,16 @@ static int Activate( vlc_object_t *p_this )
free( p_sys );
hack--;
#endif
return VLC_EGENERIC;
}
static void Destroy( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
#if 0
filter_t *p_filter = (filter_t *)p_this;
vout_chroma_t chroma = p_vout->chroma;
p_vout->chroma = chroma.p_sys->chroma1;
module_Unneed( p_vout, p_vout->chroma.p_module );
p_vout->chroma = chroma.p_sys->chroma2;
......@@ -158,14 +163,16 @@ static void Destroy( vlc_object_t *p_this )
}
free( chroma.p_sys );
chroma.p_sys = NULL;
#endif
}
/*****************************************************************************
* Chain
*****************************************************************************/
static void Chain( vout_thread_t *p_vout, picture_t *p_source,
static void Chain( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
#if 0
chroma_sys_t *p_sys = p_vout->chroma.p_sys;
if( !p_sys->p_tmp )
......@@ -190,4 +197,5 @@ static void Chain( vout_thread_t *p_vout, picture_t *p_source,
p_vout->chroma = p_sys->chroma2;
p_sys->chroma2.pf_convert( p_vout, p_sys->p_tmp, p_dest );
p_vout->chroma = chroma;
#endif
}
/*****************************************************************************
* grey_yuv.c : grayscale to others conversion module for vlc
*****************************************************************************
* Copyright (C) 2007 the VideoLAN team
* Copyright (C) 2007, 2008 the VideoLAN team
* $Id$
*
* Authors: Sam Hocevar <sam@zoy.org>
......@@ -31,6 +31,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "GREY"
......@@ -41,8 +42,8 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void GREY_I420 ( vout_thread_t *, picture_t *, picture_t * );
static void GREY_YUY2 ( vout_thread_t *, picture_t *, picture_t * );
static void GREY_I420( filter_t *, picture_t *, picture_t * );
static void GREY_YUY2( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor.
......@@ -60,25 +61,26 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_out.video.i_width & 1
|| p_filter->fmt_out.video.i_height & 1 )
{
return -1;
}
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','8','0','0'):
p_vout->render.i_chroma = VLC_FOURCC('G','R','E','Y');
p_filter->fmt_in.video.i_chroma = VLC_FOURCC('G','R','E','Y');
case VLC_FOURCC('G','R','E','Y'):
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
p_vout->chroma.pf_convert = GREY_I420;
p_filter->pf_video_filter_io = GREY_I420;
break;
case VLC_FOURCC('Y','U','Y','2'):
p_vout->chroma.pf_convert = GREY_YUY2;
p_filter->pf_video_filter_io = GREY_YUY2;
break;
default:
return -1;
......@@ -97,8 +99,8 @@ static int Activate( vlc_object_t *p_this )
/*****************************************************************************
* GREY_I420: 8-bit grayscale to planar YUV 4:2:0
*****************************************************************************/
static void GREY_I420( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void GREY_I420( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
uint8_t *p_y = p_dest->Y_PIXELS;
......@@ -114,7 +116,7 @@ static void GREY_I420( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin_c = p_dest->p[1].i_pitch
- p_dest->p[1].i_visible_pitch;
for( i_y = p_vout->render.i_height / 2; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2; i_y-- ; )
{
memset(p_u, 0x80, p_dest->p[1].i_visible_pitch);
p_u += i_dest_margin_c;
......@@ -123,9 +125,9 @@ static void GREY_I420( vout_thread_t *p_vout, picture_t *p_source,
p_v += i_dest_margin_c;
}
for( i_y = p_vout->render.i_height; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8; i_x-- ; )
{
*p_y++ = *p_line++; *p_y++ = *p_line++;
*p_y++ = *p_line++; *p_y++ = *p_line++;
......@@ -133,7 +135,7 @@ static void GREY_I420( vout_thread_t *p_vout, picture_t *p_source,
*p_y++ = *p_line++; *p_y++ = *p_line++;
}
for( i_x = p_vout->render.i_width % 8; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width % 8; i_x-- ; )
{
*p_y++ = *p_line++;
}
......@@ -146,8 +148,8 @@ static void GREY_I420( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* GREY_YUY2: 8-bit grayscale to packed YUY2
*****************************************************************************/
static void GREY_YUY2( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void GREY_YUY2( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_in = p_source->p->p_pixels;
uint8_t *p_out = p_dest->p->p_pixels;
......@@ -159,9 +161,9 @@ static void GREY_YUY2( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
for( i_y = p_vout->render.i_height; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8; i_x-- ; )
{
*p_out++ = *p_in++; *p_out++ = 0x80;
*p_out++ = *p_in++; *p_out++ = 0x80;
......@@ -173,7 +175,7 @@ static void GREY_YUY2( vout_thread_t *p_vout, picture_t *p_source,
*p_out++ = *p_in++; *p_out++ = 0x80;
}
for( i_x = (p_vout->render.i_width % 8) / 2; i_x-- ; )
for( i_x = (p_filter->fmt_out.video.i_width % 8) / 2; i_x-- ; )
{
*p_out++ = *p_in++; *p_out++ = 0x80;
*p_out++ = *p_in++; *p_out++ = 0x80;
......
/*****************************************************************************
* i420_rgb.c : YUV to bitmap RGB conversion module for vlc
*****************************************************************************
* Copyright (C) 2000, 2001, 2004 the VideoLAN team
* Copyright (C) 2000, 2001, 2004, 2008 the VideoLAN team
* $Id$
*
* Authors: Sam Hocevar <sam@zoy.org>
......@@ -33,6 +33,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i420_rgb.h"
......@@ -43,13 +44,13 @@
/*****************************************************************************
* RGB2PIXEL: assemble RGB components to a pixel value, returns a uint32_t
*****************************************************************************/
#define RGB2PIXEL( p_vout, i_r, i_g, i_b ) \
(((((uint32_t)i_r) >> p_vout->output.i_rrshift) \
<< p_vout->output.i_lrshift) \
| ((((uint32_t)i_g) >> p_vout->output.i_rgshift) \
<< p_vout->output.i_lgshift) \
| ((((uint32_t)i_b) >> p_vout->output.i_rbshift) \
<< p_vout->output.i_lbshift))
#define RGB2PIXEL( p_filter, i_r, i_g, i_b ) \
(((((uint32_t)i_r) >> p_filter->fmt_out.video.i_rrshift) \
<< p_filter->fmt_out.video.i_lrshift) \
| ((((uint32_t)i_g) >> p_filter->fmt_out.video.i_rgshift) \
<< p_filter->fmt_out.video.i_lgshift) \
| ((((uint32_t)i_b) >> p_filter->fmt_out.video.i_rbshift) \
<< p_filter->fmt_out.video.i_lbshift))
/*****************************************************************************
* Local and extern prototypes.
......@@ -59,8 +60,8 @@ static void Deactivate ( vlc_object_t * );
#if defined (MODULE_NAME_IS_i420_rgb)
static void SetGammaTable ( int *pi_table, double f_gamma );
static void SetYUV ( vout_thread_t * );
static void Set8bppPalette ( vout_thread_t *, uint8_t * );
static void SetYUV ( filter_t * );
static void Set8bppPalette ( filter_t *, uint8_t * );
#endif
/*****************************************************************************
......@@ -92,53 +93,54 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
#if defined (MODULE_NAME_IS_i420_rgb)
size_t i_tables_size;
#endif
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_out.video.i_width & 1
|| p_filter->fmt_out.video.i_height & 1 )
{
return -1;
return VLC_EGENERIC;
}
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','V','1','2'):
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
#if defined (MODULE_NAME_IS_i420_rgb)
case VLC_FOURCC('R','G','B','2'):
p_vout->chroma.pf_convert = I420_RGB8;
p_filter->pf_video_filter_io = I420_RGB8;
break;
#endif
case VLC_FOURCC('R','V','1','5'):
case VLC_FOURCC('R','V','1','6'):
#if ! defined (MODULE_NAME_IS_i420_rgb)
/* If we don't have support for the bitmasks, bail out */
if( ( p_vout->output.i_rmask == 0x7c00
&& p_vout->output.i_gmask == 0x03e0
&& p_vout->output.i_bmask == 0x001f ) )
if( ( p_filter->fmt_out.video.i_rmask == 0x7c00
&& p_filter->fmt_out.video.i_gmask == 0x03e0
&& p_filter->fmt_out.video.i_bmask == 0x001f ) )
{
/* R5G5B6 pixel format */
msg_Dbg(p_this, "RGB pixel format is R5G5B5");
p_vout->chroma.pf_convert = I420_R5G5B5;
p_filter->pf_video_filter_io = I420_R5G5B5;
}
else if( ( p_vout->output.i_rmask == 0xf800
&& p_vout->output.i_gmask == 0x07e0
&& p_vout->output.i_bmask == 0x001f ) )
else if( ( p_filter->fmt_out.video.i_rmask == 0xf800
&& p_filter->fmt_out.video.i_gmask == 0x07e0
&& p_filter->fmt_out.video.i_bmask == 0x001f ) )
{
/* R5G6B5 pixel format */
msg_Dbg(p_this, "RGB pixel format is R5G6B5");
p_vout->chroma.pf_convert = I420_R5G6B5;
p_filter->pf_video_filter_io = I420_R5G6B5;
}
else
return -1;
return VLC_EGENERIC;
#else
// generic C chroma converter */
p_vout->chroma.pf_convert = I420_RGB16;
p_filter->pf_video_filter_io = I420_RGB16;
#endif
break;
......@@ -150,103 +152,103 @@ static int Activate( vlc_object_t *p_this )
case VLC_FOURCC('R','V','3','2'):
#if ! defined (MODULE_NAME_IS_i420_rgb)
/* If we don't have support for the bitmasks, bail out */
if( p_vout->output.i_rmask == 0x00ff0000
&& p_vout->output.i_gmask == 0x0000ff00
&& p_vout->output.i_bmask == 0x000000ff )
if( p_filter->fmt_out.video.i_rmask == 0x00ff0000
&& p_filter->fmt_out.video.i_gmask == 0x0000ff00
&& p_filter->fmt_out.video.i_bmask == 0x000000ff )
{
/* A8R8G8B8 pixel format */
msg_Dbg(p_this, "RGB pixel format is A8R8G8B8");
p_vout->chroma.pf_convert = I420_A8R8G8B8;
p_filter->pf_video_filter_io = I420_A8R8G8B8;
}
else if( p_vout->output.i_rmask == 0xff000000
&& p_vout->output.i_gmask == 0x00ff0000
&& p_vout->output.i_bmask == 0x0000ff00 )
else if( p_filter->fmt_out.video.i_rmask == 0xff000000
&& p_filter->fmt_out.video.i_gmask == 0x00ff0000
&& p_filter->fmt_out.video.i_bmask == 0x0000ff00 )
{
/* R8G8B8A8 pixel format */
msg_Dbg(p_this, "RGB pixel format is R8G8B8A8");
p_vout->chroma.pf_convert = I420_R8G8B8A8;
p_filter->pf_video_filter_io = I420_R8G8B8A8;
}
else if( p_vout->output.i_rmask == 0x0000ff00
&& p_vout->output.i_gmask == 0x00ff0000
&& p_vout->output.i_bmask == 0xff000000 )
else if( p_filter->fmt_out.video.i_rmask == 0x0000ff00
&& p_filter->fmt_out.video.i_gmask == 0x00ff0000
&& p_filter->fmt_out.video.i_bmask == 0xff000000 )
{
/* B8G8R8A8 pixel format */
msg_Dbg(p_this, "RGB pixel format is B8G8R8A8");
p_vout->chroma.pf_convert = I420_B8G8R8A8;
p_filter->pf_video_filter_io = I420_B8G8R8A8;
}
else if( p_vout->output.i_rmask == 0x000000ff
&& p_vout->output.i_gmask == 0x0000ff00
&& p_vout->output.i_bmask == 0x00ff0000 )
else if( p_filter->fmt_out.video.i_rmask == 0x000000ff
&& p_filter->fmt_out.video.i_gmask == 0x0000ff00
&& p_filter->fmt_out.video.i_bmask == 0x00ff0000 )
{
/* A8B8G8R8 pixel format */
msg_Dbg(p_this, "RGB pixel format is A8B8G8R8");
p_vout->chroma.pf_convert = I420_A8B8G8R8;
p_filter->pf_video_filter_io = I420_A8B8G8R8;
}
else
return -1;
return VLC_EGENERIC;
#else
/* generic C chroma converter */
p_vout->chroma.pf_convert = I420_RGB32;
p_filter->pf_video_filter_io = I420_RGB32;
#endif
break;
default:
return -1;
return VLC_EGENERIC;
}
break;
default:
return -1;
return VLC_EGENERIC;
}
p_vout->chroma.p_sys = malloc( sizeof( chroma_sys_t ) );
if( p_vout->chroma.p_sys == NULL )
p_filter->p_sys = malloc( sizeof( filter_sys_t ) );
if( p_filter->p_sys == NULL )
{
return -1;
return VLC_EGENERIC;
}
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
#if defined (MODULE_NAME_IS_i420_rgb)
case VLC_FOURCC('R','G','B','2'):
p_vout->chroma.p_sys->p_buffer = malloc( VOUT_MAX_WIDTH );
p_filter->p_sys->p_buffer = malloc( VOUT_MAX_WIDTH );
break;
#endif
case VLC_FOURCC('R','V','1','5'):
case VLC_FOURCC('R','V','1','6'):
p_vout->chroma.p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 2 );
p_filter->p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 2 );
break;
case VLC_FOURCC('R','V','2','4'):
case VLC_FOURCC('R','V','3','2'):
p_vout->chroma.p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 4 );
p_filter->p_sys->p_buffer = malloc( VOUT_MAX_WIDTH * 4 );
break;
default:
p_vout->chroma.p_sys->p_buffer = NULL;
p_filter->p_sys->p_buffer = NULL;
break;
}
if( p_vout->chroma.p_sys->p_buffer == NULL )
if( p_filter->p_sys->p_buffer == NULL )
{
free( p_vout->chroma.p_sys );
return -1;
free( p_filter->p_sys );
return VLC_EGENERIC;
}
p_vout->chroma.p_sys->p_offset = malloc( p_vout->output.i_width
* ( ( p_vout->output.i_chroma
p_filter->p_sys->p_offset = malloc( p_filter->fmt_out.video.i_width
* ( ( p_filter->fmt_out.video.i_chroma
== VLC_FOURCC('R','G','B','2') ) ? 2 : 1 )
* sizeof( int ) );
if( p_vout->chroma.p_sys->p_offset == NULL )
if( p_filter->p_sys->p_offset == NULL )
{
free( p_vout->chroma.p_sys->p_buffer );
free( p_vout->chroma.p_sys );
return -1;
free( p_filter->p_sys->p_buffer );
free( p_filter->p_sys );
return VLC_EGENERIC;
}
#if defined (MODULE_NAME_IS_i420_rgb)
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('R','G','B','2'):
i_tables_size = sizeof( uint8_t ) * PALETTE_TABLE_SIZE;
......@@ -260,16 +262,16 @@ static int Activate( vlc_object_t *p_this )
break;
}
p_vout->chroma.p_sys->p_base = malloc( i_tables_size );
if( p_vout->chroma.p_sys->p_base == NULL )
p_filter->p_sys->p_base = malloc( i_tables_size );
if( p_filter->p_sys->p_base == NULL )
{
free( p_vout->chroma.p_sys->p_offset );
free( p_vout->chroma.p_sys->p_buffer );
free( p_vout->chroma.p_sys );
free( p_filter->p_sys->p_offset );
free( p_filter->p_sys->p_buffer );
free( p_filter->p_sys );
return -1;
}
SetYUV( p_vout );
SetYUV( p_filter );
#endif
return 0;
......@@ -282,14 +284,14 @@ static int Activate( vlc_object_t *p_this )
*****************************************************************************/
static void Deactivate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
#if defined (MODULE_NAME_IS_i420_rgb)
free( p_vout->chroma.p_sys->p_base );
free( p_filter->p_sys->p_base );
#endif
free( p_vout->chroma.p_sys->p_offset );
free( p_vout->chroma.p_sys->p_buffer );
free( p_vout->chroma.p_sys );
free( p_filter->p_sys->p_offset );
free( p_filter->p_sys->p_buffer );
free( p_filter->p_sys );
}
#if defined (MODULE_NAME_IS_i420_rgb)
......@@ -315,7 +317,7 @@ static void SetGammaTable( int *pi_table, double f_gamma )
/*****************************************************************************
* SetYUV: compute tables and set function pointers
*****************************************************************************/
static void SetYUV( vout_thread_t *p_vout )
static void SetYUV( filter_t *p_filter )
{
int pi_gamma[256]; /* gamma table */
volatile int i_index; /* index in tables */
......@@ -323,84 +325,84 @@ static void SetYUV( vout_thread_t *p_vout )
* optimization bug */
/* Build gamma table */
SetGammaTable( pi_gamma, p_vout->f_gamma );
SetGammaTable( pi_gamma, 0 ); //p_filter/*FIXME wasn't used anywhere anyway*/->f_gamma );
/*
* Set pointers and build YUV tables
*/
/* Color: build red, green and blue tables */
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('R','G','B','2'):
p_vout->chroma.p_sys->p_rgb8 = (uint8_t *)p_vout->chroma.p_sys->p_base;
Set8bppPalette( p_vout, p_vout->chroma.p_sys->p_rgb8 );
p_filter->p_sys->p_rgb8 = (uint8_t *)p_filter->p_sys->p_base;
Set8bppPalette( p_filter, p_filter->p_sys->p_rgb8 );
break;
case VLC_FOURCC('R','V','1','5'):
case VLC_FOURCC('R','V','1','6'):
p_vout->chroma.p_sys->p_rgb16 = (uint16_t *)p_vout->chroma.p_sys->p_base;
p_filter->p_sys->p_rgb16 = (uint16_t *)p_filter->p_sys->p_base;
for( i_index = 0; i_index < RED_MARGIN; i_index++ )
{
p_vout->chroma.p_sys->p_rgb16[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_vout, pi_gamma[0], 0, 0 );
p_vout->chroma.p_sys->p_rgb16[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, pi_gamma[255], 0, 0 );
p_filter->p_sys->p_rgb16[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_filter, pi_gamma[0], 0, 0 );
p_filter->p_sys->p_rgb16[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, pi_gamma[255], 0, 0 );
}
for( i_index = 0; i_index < GREEN_MARGIN; i_index++ )
{
p_vout->chroma.p_sys->p_rgb16[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[0], 0 );
p_vout->chroma.p_sys->p_rgb16[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[255], 0 );
p_filter->p_sys->p_rgb16[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[0], 0 );
p_filter->p_sys->p_rgb16[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[255], 0 );
}
for( i_index = 0; i_index < BLUE_MARGIN; i_index++ )
{
p_vout->chroma.p_sys->p_rgb16[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[0] );
p_vout->chroma.p_sys->p_rgb16[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[255] );
p_filter->p_sys->p_rgb16[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[0] );
p_filter->p_sys->p_rgb16[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[255] );
}
for( i_index = 0; i_index < 256; i_index++ )
{
p_vout->chroma.p_sys->p_rgb16[RED_OFFSET + i_index] = RGB2PIXEL( p_vout, pi_gamma[ i_index ], 0, 0 );
p_vout->chroma.p_sys->p_rgb16[GREEN_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[ i_index ], 0 );
p_vout->chroma.p_sys->p_rgb16[BLUE_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[ i_index ] );
p_filter->p_sys->p_rgb16[RED_OFFSET + i_index] = RGB2PIXEL( p_filter, pi_gamma[ i_index ], 0, 0 );
p_filter->p_sys->p_rgb16[GREEN_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[ i_index ], 0 );
p_filter->p_sys->p_rgb16[BLUE_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[ i_index ] );
}
break;
case VLC_FOURCC('R','V','2','4'):
case VLC_FOURCC('R','V','3','2'):
p_vout->chroma.p_sys->p_rgb32 = (uint32_t *)p_vout->chroma.p_sys->p_base;
p_filter->p_sys->p_rgb32 = (uint32_t *)p_filter->p_sys->p_base;
for( i_index = 0; i_index < RED_MARGIN; i_index++ )
{
p_vout->chroma.p_sys->p_rgb32[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_vout, pi_gamma[0], 0, 0 );
p_vout->chroma.p_sys->p_rgb32[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, pi_gamma[255], 0, 0 );
p_filter->p_sys->p_rgb32[RED_OFFSET - RED_MARGIN + i_index] = RGB2PIXEL( p_filter, pi_gamma[0], 0, 0 );
p_filter->p_sys->p_rgb32[RED_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, pi_gamma[255], 0, 0 );
}
for( i_index = 0; i_index < GREEN_MARGIN; i_index++ )
{
p_vout->chroma.p_sys->p_rgb32[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[0], 0 );
p_vout->chroma.p_sys->p_rgb32[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[255], 0 );
p_filter->p_sys->p_rgb32[GREEN_OFFSET - GREEN_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[0], 0 );
p_filter->p_sys->p_rgb32[GREEN_OFFSET + 256 + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[255], 0 );
}
for( i_index = 0; i_index < BLUE_MARGIN; i_index++ )
{
p_vout->chroma.p_sys->p_rgb32[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[0] );
p_vout->chroma.p_sys->p_rgb32[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[255] );
p_filter->p_sys->p_rgb32[BLUE_OFFSET - BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[0] );
p_filter->p_sys->p_rgb32[BLUE_OFFSET + BLUE_MARGIN + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[255] );
}
for( i_index = 0; i_index < 256; i_index++ )
{
p_vout->chroma.p_sys->p_rgb32[RED_OFFSET + i_index] = RGB2PIXEL( p_vout, pi_gamma[ i_index ], 0, 0 );
p_vout->chroma.p_sys->p_rgb32[GREEN_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, pi_gamma[ i_index ], 0 );
p_vout->chroma.p_sys->p_rgb32[BLUE_OFFSET + i_index] = RGB2PIXEL( p_vout, 0, 0, pi_gamma[ i_index ] );
p_filter->p_sys->p_rgb32[RED_OFFSET + i_index] = RGB2PIXEL( p_filter, pi_gamma[ i_index ], 0, 0 );
p_filter->p_sys->p_rgb32[GREEN_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, pi_gamma[ i_index ], 0 );
p_filter->p_sys->p_rgb32[BLUE_OFFSET + i_index] = RGB2PIXEL( p_filter, 0, 0, pi_gamma[ i_index ] );
}
break;
}
}
static void Set8bppPalette( vout_thread_t *p_vout, uint8_t *p_rgb8 )
static void Set8bppPalette( filter_t *p_filter, uint8_t *p_rgb8 )
{
#define CLIP( x ) ( ((x < 0) ? 0 : (x > 255) ? 255 : x) << 8 )
int y,u,v;
int r,g,b;
int i = 0, j = 0;
uint16_t *p_cmap_r=p_vout->chroma.p_sys->p_rgb_r;
uint16_t *p_cmap_g=p_vout->chroma.p_sys->p_rgb_g;
uint16_t *p_cmap_b=p_vout->chroma.p_sys->p_rgb_b;
uint16_t *p_cmap_r = p_filter->p_sys->p_rgb_r;
uint16_t *p_cmap_g = p_filter->p_sys->p_rgb_g;
uint16_t *p_cmap_b = p_filter->p_sys->p_rgb_b;
unsigned char p_lookup[PALETTE_TABLE_SIZE];
......@@ -423,7 +425,7 @@ static void Set8bppPalette( vout_thread_t *p_vout, uint8_t *p_rgb8 )
* fscked up my code */
if( j == 256 )
{
msg_Err( p_vout, "no colors left in palette" );
msg_Err( p_filter, "no colors left in palette" );
break;
}
......@@ -453,7 +455,8 @@ static void Set8bppPalette( vout_thread_t *p_vout, uint8_t *p_rgb8 )
}
/* The colors have been allocated, we can set the palette */
p_vout->output.pf_setpalette( p_vout, p_cmap_r, p_cmap_g, p_cmap_b );
/* FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME
p_filter->fmt_out.video.pf_setpalette( p_filter, p_cmap_r, p_cmap_g, p_cmap_b );*/
#if 0
/* There will eventually be a way to know which colors
......
......@@ -25,12 +25,12 @@
#define CMAP_RGB2_SIZE 256
/**
* chroma_sys_t: chroma method descriptor
* filter_sys_t: chroma method descriptor
* This structure is part of the chroma transformation descriptor, it
* describes the yuv2rgb specific properties.
*/
struct chroma_sys_t
struct filter_sys_t
{
uint8_t *p_buffer;
int *p_offset;
......@@ -56,17 +56,17 @@ struct chroma_sys_t
* Prototypes
*****************************************************************************/
#ifdef MODULE_NAME_IS_i420_rgb
void I420_RGB8 ( vout_thread_t *, picture_t *, picture_t * );
void I420_RGB16_dither ( vout_thread_t *, picture_t *, picture_t * );
void I420_RGB16 ( vout_thread_t *, picture_t *, picture_t * );
void I420_RGB32 ( vout_thread_t *, picture_t *, picture_t * );
void I420_RGB8 ( filter_t *, picture_t *, picture_t * );
void I420_RGB16_dither ( filter_t *, picture_t *, picture_t * );
void I420_RGB16 ( filter_t *, picture_t *, picture_t * );
void I420_RGB32 ( filter_t *, picture_t *, picture_t * );
#else // if defined(MODULE_NAME_IS_i420_rgb_mmx)
void I420_R5G5B5 ( vout_thread_t *, picture_t *, picture_t * );
void I420_R5G6B5 ( vout_thread_t *, picture_t *, picture_t * );
void I420_A8R8G8B8 ( vout_thread_t *, picture_t *, picture_t * );
void I420_R8G8B8A8 ( vout_thread_t *, picture_t *, picture_t * );
void I420_B8G8R8A8 ( vout_thread_t *, picture_t *, picture_t * );
void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
void I420_R5G5B5 ( filter_t *, picture_t *, picture_t * );
void I420_R5G6B5 ( filter_t *, picture_t *, picture_t * );
void I420_A8R8G8B8 ( filter_t *, picture_t *, picture_t * );
void I420_R8G8B8A8 ( filter_t *, picture_t *, picture_t * );
void I420_B8G8R8A8 ( filter_t *, picture_t *, picture_t * );
void I420_A8B8G8R8 ( filter_t *, picture_t *, picture_t * );
#endif
/*****************************************************************************
......@@ -170,7 +170,7 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
* Rewind buffer and offset, then copy and scale line */ \
p_buffer = p_buffer_start; \
p_offset = p_offset_start; \
for( i_x = p_vout->output.i_width / 16; i_x--; ) \
for( i_x = p_filter->fmt_out.video.i_width / 16; i_x--; ) \
{ \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
......@@ -189,7 +189,7 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
} \
for( i_x = p_vout->output.i_width & 15; i_x--; ) \
for( i_x = p_filter->fmt_out.video.i_width & 15; i_x--; ) \
{ \
*p_pic++ = *p_buffer; p_buffer += *p_offset++; \
} \
......@@ -212,7 +212,7 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
{ \
/* Horizontal scaling - we can't use a buffer due to dithering */ \
p_offset = p_offset_start; \
for( i_x = p_vout->output.i_width / 16; i_x--; ) \
for( i_x = p_filter->fmt_out.video.i_width / 16; i_x--; ) \
{ \
CONVERT_4YUV_PIXEL_SCALE( CHROMA ) \
CONVERT_4YUV_PIXEL_SCALE( CHROMA ) \
......@@ -222,7 +222,7 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
} \
else \
{ \
for( i_x = p_vout->render.i_width / 16; i_x--; ) \
for( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; ) \
{ \
CONVERT_4YUV_PIXEL( CHROMA ) \
CONVERT_4YUV_PIXEL( CHROMA ) \
......@@ -258,10 +258,10 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
switch( i_vscale ) \
{ \
case -1: /* vertical scaling factor is < 1 */ \
while( (i_scale_count -= p_vout->output.i_height) > 0 ) \
while( (i_scale_count -= p_filter->fmt_out.video.i_height) > 0 ) \
{ \
/* Height reduction: skip next source line */ \
p_y += p_vout->render.i_width; \
p_y += p_filter->fmt_in.video.i_width; \
i_y++; \
if( (CHROMA == 420) || (CHROMA == 422) ) \
{ \
......@@ -273,20 +273,20 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
} \
else if( CHROMA == 444 ) \
{ \
p_u += p_vout->render.i_width; \
p_v += p_vout->render.i_width; \
p_u += p_filter->fmt_in.video.i_width; \
p_v += p_filter->fmt_in.video.i_width; \
} \
} \
i_scale_count += p_vout->render.i_height; \
i_scale_count += p_filter->fmt_in.video.i_height; \
break; \
case 1: /* vertical scaling factor is > 1 */ \
while( (i_scale_count -= p_vout->render.i_height) > 0 ) \
while( (i_scale_count -= p_filter->fmt_in.video.i_height) > 0 ) \
{ \
/* Height increment: copy previous picture line */ \
vlc_memcpy( p_pic, p_pic_start, p_vout->output.i_width * BPP ); \
vlc_memcpy( p_pic, p_pic_start, p_filter->fmt_out.video.i_width * BPP ); \
p_pic = (void*)((uint8_t*)p_pic + p_dest->p->i_pitch ); \
} \
i_scale_count += p_vout->output.i_height; \
i_scale_count += p_filter->fmt_out.video.i_height; \
break; \
} \
......@@ -313,10 +313,10 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
switch( i_vscale ) \
{ \
case -1: /* vertical scaling factor is < 1 */ \
while( (i_scale_count -= p_vout->output.i_height) > 0 ) \
while( (i_scale_count -= p_filter->fmt_out.video.i_height) > 0 ) \
{ \
/* Height reduction: skip next source line */ \
p_y += p_vout->render.i_width; \
p_y += p_filter->fmt_in.video.i_width; \
i_y++; \
if( (CHROMA == 420) || (CHROMA == 422) ) \
{ \
......@@ -328,21 +328,21 @@ void I420_A8B8G8R8 ( vout_thread_t *, picture_t *, picture_t * );
} \
else if( CHROMA == 444 ) \
{ \
p_u += p_vout->render.i_width; \
p_v += p_vout->render.i_width; \
p_u += p_filter->fmt_in.video.i_width; \
p_v += p_filter->fmt_in.video.i_width; \
} \
} \
i_scale_count += p_vout->render.i_height; \
i_scale_count += p_filter->fmt_in.video.i_height; \
break; \
case 1: /* vertical scaling factor is > 1 */ \
while( (i_scale_count -= p_vout->render.i_height) > 0 ) \
while( (i_scale_count -= p_filter->fmt_in.video.i_height) > 0 ) \
{ \
p_y -= p_vout->render.i_width; \
p_y -= p_filter->fmt_in.video.i_width; \
p_u -= i_chroma_width; \
p_v -= i_chroma_width; \
SCALE_WIDTH_DITHER( CHROMA ); \
} \
i_scale_count += p_vout->output.i_height; \
i_scale_count += p_filter->fmt_out.video.i_height; \
break; \
} \
......@@ -30,7 +30,8 @@
# include "config.h"
#endif
#include <vlc_common.h>
#include <vlc/vlc.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i420_rgb.h"
......@@ -56,8 +57,8 @@ static void SetOffset( int, int, int, int, bool *,
* - input: 2 lines (2 Y lines, 1 U/V line)
* - output: 1 line
*****************************************************************************/
void I420_RGB16_dither( vout_thread_t *p_vout, picture_t *p_src,
picture_t *p_dest )
void I420_RGB16_dither( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
......@@ -73,19 +74,19 @@ void I420_RGB16_dither( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
int i_uval, i_vval; /* U and V samples */
int i_red, i_green, i_blue; /* U and V modified samples */
uint16_t * p_yuv = p_vout->chroma.p_sys->p_rgb16;
uint16_t * p_ybase; /* Y dependent conversion table */
uint16_t * p_yuv = p_filter->p_sys->p_rgb16;
uint16_t * p_ybase; /* Y dependant conversion table */
/* Conversion buffer pointer */
uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -101,17 +102,17 @@ void I420_RGB16_dither( vout_thread_t *p_vout, picture_t *p_src,
for(i_x = 0; i_x < 4; i_x++)
{
dither10[i_x] = dither10[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
dither11[i_x] = dither11[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
dither12[i_x] = dither12[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
dither13[i_x] = dither13[i_x] << (SHIFT - 4 + p_vout->output.i_rrshift);
dither10[i_x] = dither10[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
dither11[i_x] = dither11[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
dither12[i_x] = dither12[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
dither13[i_x] = dither13[i_x] << (SHIFT - 4 + p_filter->fmt_out.video.i_rrshift);
}
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
......@@ -121,22 +122,25 @@ void I420_RGB16_dither( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
i_real_y = i_y & 0x3;
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
int *p_dither = dither10;
CONVERT_YUV_PIXEL_DITHER(2);
......@@ -207,8 +211,8 @@ void I420_RGB16_dither( vout_thread_t *p_vout, picture_t *p_src,
#if defined (MODULE_NAME_IS_i420_rgb)
void I420_RGB16( vout_thread_t *p_vout, picture_t *p_src,
picture_t *p_dest )
void I420_RGB16( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
......@@ -223,19 +227,19 @@ void I420_RGB16( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
int i_uval, i_vval; /* U and V samples */
int i_red, i_green, i_blue; /* U and V modified samples */
uint16_t * p_yuv = p_vout->chroma.p_sys->p_rgb16;
uint16_t * p_ybase; /* Y dependent conversion table */
uint16_t * p_yuv = p_filter->p_sys->p_rgb16;
uint16_t * p_ybase; /* Y dependant conversion table */
/* Conversion buffer pointer */
uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -245,9 +249,9 @@ void I420_RGB16( vout_thread_t *p_vout, picture_t *p_src,
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
......@@ -257,21 +261,24 @@ void I420_RGB16( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
CONVERT_YUV_PIXEL(2); CONVERT_Y_PIXEL(2);
CONVERT_YUV_PIXEL(2); CONVERT_Y_PIXEL(2);
......@@ -307,8 +314,8 @@ void I420_RGB16( vout_thread_t *p_vout, picture_t *p_src,
#else // ! defined (MODULE_NAME_IS_i420_rgb)
void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
picture_t *p_dest )
void I420_R5G5B5( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
......@@ -323,15 +330,15 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -344,8 +351,10 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
......@@ -353,13 +362,14 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
if( p_vout->render.i_width & 15 )
if( p_filter->fmt_in.video.i_width & 15 )
{
i_rewind = 16 - ( p_vout->render.i_width & 15 );
i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
......@@ -378,11 +388,11 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
for ( i_x = p_vout->render.i_width/16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_16_ALIGNED
......@@ -429,12 +439,12 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width/16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_16_UNALIGNED
......@@ -484,21 +494,21 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_16
......@@ -548,8 +558,8 @@ void I420_R5G5B5( vout_thread_t *p_vout, picture_t *p_src,
#endif
}
void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
picture_t *p_dest )
void I420_R5G6B5( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
uint16_t *p_pic = (uint16_t*)p_dest->p->p_pixels;
......@@ -564,15 +574,15 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint16_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
uint16_t * p_buffer_start = (uint16_t*)p_vout->chroma.p_sys->p_buffer;
uint16_t * p_buffer_start = (uint16_t*)p_filter->p_sys->p_buffer;
uint16_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -585,8 +595,10 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
......@@ -594,13 +606,14 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
if( p_vout->render.i_width & 15 )
if( p_filter->fmt_in.video.i_width & 15 )
{
i_rewind = 16 - ( p_vout->render.i_width & 15 );
i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
......@@ -619,11 +632,11 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
for ( i_x = p_vout->render.i_width/16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_16_ALIGNED
......@@ -670,12 +683,12 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width/16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width/16; i_x--; )
{
SSE2_CALL(
SSE2_INIT_16_UNALIGNED
......@@ -725,21 +738,21 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_16
......@@ -804,8 +817,8 @@ void I420_R5G6B5( vout_thread_t *p_vout, picture_t *p_src,
#if defined (MODULE_NAME_IS_i420_rgb)
void I420_RGB32( vout_thread_t *p_vout, picture_t *p_src,
picture_t *p_dest )
void I420_RGB32( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
uint32_t *p_pic = (uint32_t*)p_dest->p->p_pixels;
......@@ -820,19 +833,19 @@ void I420_RGB32( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
int i_uval, i_vval; /* U and V samples */
int i_red, i_green, i_blue; /* U and V modified samples */
uint32_t * p_yuv = p_vout->chroma.p_sys->p_rgb32;
uint32_t * p_ybase; /* Y dependent conversion table */
uint32_t * p_yuv = p_filter->p_sys->p_rgb32;
uint32_t * p_ybase; /* Y dependant conversion table */
/* Conversion buffer pointer */
uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -842,9 +855,9 @@ void I420_RGB32( vout_thread_t *p_vout, picture_t *p_src,
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
......@@ -854,21 +867,24 @@ void I420_RGB32( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
CONVERT_YUV_PIXEL(4); CONVERT_Y_PIXEL(4);
CONVERT_YUV_PIXEL(4); CONVERT_Y_PIXEL(4);
......@@ -903,7 +919,7 @@ void I420_RGB32( vout_thread_t *p_vout, picture_t *p_src,
#else // defined (MODULE_NAME_IS_i420_rgb_mmx) || defined (MODULE_NAME_IS_i420_rgb_sse2)
void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
void I420_A8R8G8B8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
......@@ -919,14 +935,14 @@ void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -939,21 +955,24 @@ void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
if( p_vout->render.i_width & 15 )
if( p_filter->fmt_in.video.i_width & 15 )
{
i_rewind = 16 - ( p_vout->render.i_width & 15 );
i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
......@@ -972,11 +991,11 @@ void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
......@@ -1023,12 +1042,12 @@ void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
......@@ -1078,21 +1097,21 @@ void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
......@@ -1142,7 +1161,7 @@ void I420_A8R8G8B8( vout_thread_t *p_vout, picture_t *p_src,
#endif
}
void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
void I420_R8G8B8A8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
......@@ -1158,14 +1177,14 @@ void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -1178,21 +1197,24 @@ void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
if( p_vout->render.i_width & 15 )
if( p_filter->fmt_in.video.i_width & 15 )
{
i_rewind = 16 - ( p_vout->render.i_width & 15 );
i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
......@@ -1211,11 +1233,11 @@ void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
......@@ -1262,12 +1284,12 @@ void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
......@@ -1317,21 +1339,21 @@ void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
#else // defined (MODULE_NAME_IS_i420_rgb_mmx)
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
......@@ -1381,7 +1403,7 @@ void I420_R8G8B8A8( vout_thread_t *p_vout, picture_t *p_src,
#endif
}
void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
void I420_B8G8R8A8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
......@@ -1397,14 +1419,14 @@ void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -1417,21 +1439,24 @@ void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
if( p_vout->render.i_width & 15 )
if( p_filter->fmt_in.video.i_width & 15 )
{
i_rewind = 16 - ( p_vout->render.i_width & 15 );
i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
......@@ -1450,11 +1475,11 @@ void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
......@@ -1501,12 +1526,12 @@ void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
......@@ -1553,21 +1578,21 @@ void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
#else
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
......@@ -1617,7 +1642,7 @@ void I420_B8G8R8A8( vout_thread_t *p_vout, picture_t *p_src,
#endif
}
void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
void I420_A8B8G8R8( filter_t *p_filter, picture_t *p_src,
picture_t *p_dest )
{
/* We got this one from the old arguments */
......@@ -1633,14 +1658,14 @@ void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
int i_right_margin;
int i_rewind;
int i_scale_count; /* scale modulo counter */
int i_chroma_width = p_vout->render.i_width / 2; /* chroma width */
int i_chroma_width = p_filter->fmt_in.video.i_width / 2; /* chroma width */
uint32_t * p_pic_start; /* beginning of the current line for copy */
/* Conversion buffer pointer */
uint32_t * p_buffer_start = (uint32_t*)p_vout->chroma.p_sys->p_buffer;
uint32_t * p_buffer_start = (uint32_t*)p_filter->p_sys->p_buffer;
uint32_t * p_buffer;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -1653,21 +1678,24 @@ void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
/* Rule: when a picture of size (x1,y1) with aspect ratio r1 is rendered
* on a picture of size (x2,y2) with aspect ratio r2, if x1 grows to x1'
* then y1 grows to y1' = x1' * y2/x2 * r2/r1 */
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
/*
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
#if defined (MODULE_NAME_IS_i420_rgb_sse2)
if( p_vout->render.i_width & 15 )
if( p_filter->fmt_in.video.i_width & 15 )
{
i_rewind = 16 - ( p_vout->render.i_width & 15 );
i_rewind = 16 - ( p_filter->fmt_in.video.i_width & 15 );
}
else
{
......@@ -1686,11 +1714,11 @@ void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
((intptr_t)p_buffer))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_ALIGNED
......@@ -1737,12 +1765,12 @@ void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 16; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 16; i_x--; )
{
SSE2_CALL (
SSE2_INIT_32_UNALIGNED
......@@ -1789,21 +1817,21 @@ void I420_A8B8G8R8( vout_thread_t *p_vout, picture_t *p_src,
#else
if( p_vout->render.i_width & 7 )
if( p_filter->fmt_in.video.i_width & 7 )
{
i_rewind = 8 - ( p_vout->render.i_width & 7 );
i_rewind = 8 - ( p_filter->fmt_in.video.i_width & 7 );
}
else
{
i_rewind = 0;
}
for( i_y = 0; i_y < p_vout->render.i_height; i_y++ )
for( i_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
p_pic_start = p_pic;
p_buffer = b_hscale ? p_buffer_start : p_pic;
for ( i_x = p_vout->render.i_width / 8; i_x--; )
for ( i_x = p_filter->fmt_in.video.i_width / 8; i_x--; )
{
MMX_CALL (
MMX_INIT_32
......
......@@ -29,7 +29,8 @@
# include "config.h"
#endif
#include <vlc_common.h>
#include <vlc/vlc.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i420_rgb.h"
......@@ -40,7 +41,7 @@ static void SetOffset( int, int, int, int, bool *, int *, int * );
/*****************************************************************************
* I420_RGB8: color YUV 4:2:0 to RGB 8 bpp
*****************************************************************************/
void I420_RGB8( vout_thread_t *p_vout, picture_t *p_src, picture_t *p_dest )
void I420_RGB8( filter_t *p_filter, picture_t *p_src, picture_t *p_dest )
{
/* We got this one from the old arguments */
uint8_t *p_pic = (uint8_t*)p_dest->p->p_pixels;
......@@ -54,13 +55,13 @@ void I420_RGB8( vout_thread_t *p_vout, picture_t *p_src, picture_t *p_dest )
unsigned int i_real_y; /* y % 4 */
int i_right_margin;
int i_scale_count; /* scale modulo counter */
unsigned int i_chroma_width = p_vout->render.i_width / 2;/* chroma width */
unsigned int i_chroma_width = p_filter->fmt_in.video.i_width / 2;/* chroma width */
/* Lookup table */
uint8_t * p_lookup = p_vout->chroma.p_sys->p_base;
uint8_t * p_lookup = p_filter->p_sys->p_base;
/* Offset array pointer */
int * p_offset_start = p_vout->chroma.p_sys->p_offset;
int * p_offset_start = p_filter->p_sys->p_offset;
int * p_offset;
const int i_source_margin = p_src->p[0].i_pitch
......@@ -79,8 +80,10 @@ void I420_RGB8( vout_thread_t *p_vout, picture_t *p_src, picture_t *p_dest )
static int dither22[4] = { 0x6, 0x16, 0x2, 0x12 };
static int dither23[4] = { 0x1e, 0xe, 0x1a, 0xa };
SetOffset( p_vout->render.i_width, p_vout->render.i_height,
p_vout->output.i_width, p_vout->output.i_height,
SetOffset( p_filter->fmt_in.video.i_width,
p_filter->fmt_in.video.i_height,
p_filter->fmt_out.video.i_width,
p_filter->fmt_out.video.i_height,
&b_hscale, &i_vscale, p_offset_start );
i_right_margin = p_dest->p->i_pitch - p_dest->p->i_visible_pitch;
......@@ -89,8 +92,9 @@ void I420_RGB8( vout_thread_t *p_vout, picture_t *p_src, picture_t *p_dest )
* Perform conversion
*/
i_scale_count = ( i_vscale == 1 ) ?
p_vout->output.i_height : p_vout->render.i_height;
for( i_y = 0, i_real_y = 0; i_y < p_vout->render.i_height; i_y++ )
p_filter->fmt_out.video.i_height :
p_filter->fmt_in.video.i_height;
for( i_y = 0, i_real_y = 0; i_y < p_filter->fmt_in.video.i_height; i_y++ )
{
/* Do horizontal and vertical scaling */
SCALE_WIDTH_DITHER( 420 );
......
......@@ -32,6 +32,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "I420,IYUV,YV12"
......@@ -41,7 +42,7 @@
* Local and extern prototypes.
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void I420_YMGA ( vout_thread_t *, picture_t *, picture_t * );
static void I420_YMGA ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
......@@ -65,22 +66,23 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_in.video.i_width & 1
|| p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','V','1','2'):
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('Y','M','G','A'):
p_vout->chroma.pf_convert = I420_YMGA;
p_filter->pf_video_filter_io = I420_YMGA;
break;
default:
......@@ -100,8 +102,8 @@ static int Activate( vlc_object_t *p_this )
/*****************************************************************************
* I420_YMGA: planar YUV 4:2:0 to Matrox's planar/packed YUV 4:2:0
*****************************************************************************/
static void I420_YMGA( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_YMGA( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_uv = p_dest->U_PIXELS;
uint8_t *p_u = p_source->U_PIXELS;
......
......@@ -32,6 +32,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#if defined (MODULE_NAME_IS_i420_yuy2_altivec) && defined(HAVE_ALTIVEC_H)
......@@ -57,15 +58,15 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void I420_YUY2 ( vout_thread_t *, picture_t *, picture_t * );
static void I420_YVYU ( vout_thread_t *, picture_t *, picture_t * );
static void I420_UYVY ( vout_thread_t *, picture_t *, picture_t * );
static void I420_YUY2 ( filter_t *, picture_t *, picture_t * );
static void I420_YVYU ( filter_t *, picture_t *, picture_t * );
static void I420_UYVY ( filter_t *, picture_t *, picture_t * );
#if !defined (MODULE_NAME_IS_i420_yuy2_altivec)
static void I420_IUYV ( vout_thread_t *, picture_t *, picture_t * );
static void I420_cyuv ( vout_thread_t *, picture_t *, picture_t * );
static void I420_IUYV ( filter_t *, picture_t *, picture_t * );
static void I420_cyuv ( filter_t *, picture_t *, picture_t * );
#endif
#if defined (MODULE_NAME_IS_i420_yuy2)
static void I420_Y211 ( vout_thread_t *, picture_t *, picture_t * );
static void I420_Y211 ( filter_t *, picture_t *, picture_t * );
#endif
#ifdef MODULE_NAME_IS_i420_yuy2_mmx
......@@ -105,47 +106,48 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_in.video.i_width & 1
|| p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','V','1','2'):
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
p_vout->chroma.pf_convert = I420_YUY2;
p_filter->pf_video_filter_io = I420_YUY2;
break;
case VLC_FOURCC('Y','V','Y','U'):
p_vout->chroma.pf_convert = I420_YVYU;
p_filter->pf_video_filter_io = I420_YVYU;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
p_vout->chroma.pf_convert = I420_UYVY;
p_filter->pf_video_filter_io = I420_UYVY;
break;
#if !defined (MODULE_NAME_IS_i420_yuy2_altivec)
case VLC_FOURCC('I','U','Y','V'):
p_vout->chroma.pf_convert = I420_IUYV;
p_filter->pf_video_filter_io = I420_IUYV;
break;
case VLC_FOURCC('c','y','u','v'):
p_vout->chroma.pf_convert = I420_cyuv;
p_filter->pf_video_filter_io = I420_cyuv;
break;
#endif
#if defined (MODULE_NAME_IS_i420_yuy2)
case VLC_FOURCC('Y','2','1','1'):
p_vout->chroma.pf_convert = I420_Y211;
p_filter->pf_video_filter_io = I420_Y211;
break;
#endif
......@@ -175,8 +177,8 @@ static inline unsigned long long read_cycles(void)
/*****************************************************************************
* I420_YUY2: planar YUV 4:2:0 to packed YUYV 4:2:2
*****************************************************************************/
static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_YUY2( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
......@@ -210,14 +212,14 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
vector unsigned char uv_vec;
vector unsigned char y_vec;
if( !( ( p_vout->render.i_width % 32 ) |
( p_vout->render.i_height % 2 ) ) )
if( !( ( p_filter->fmt_in.video.i_width % 32 ) |
( p_filter->fmt_in.video.i_height % 2 ) ) )
{
/* Width is a multiple of 32, we take 2 lines at a time */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
VEC_NEXT_LINES( );
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -225,15 +227,15 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
}
}
}
else if( !( ( p_vout->render.i_width % 16 ) |
( p_vout->render.i_height % 4 ) ) )
else if( !( ( p_filter->fmt_in.video.i_width % 16 ) |
( p_filter->fmt_in.video.i_height % 4 ) ) )
{
/* Width is only a multiple of 16, we take 4 lines at a time */
for( i_y = p_vout->render.i_height / 4 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 4 ; i_y-- ; )
{
/* Line 1 and 2, pixels 0 to ( width - 16 ) */
VEC_NEXT_LINES( );
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_fiter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -249,7 +251,7 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
VEC_MERGE( vec_mergel );
/* Line 3 and 4, pixels 16 to ( width ) */
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -273,7 +275,7 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -282,7 +284,7 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
p_y2 += p_source->p[Y_PLANE].i_pitch;
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
for( i_x = p_vout->render.i_width / 8; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8; i_x-- ; )
{
C_YUV420_YUYV( );
C_YUV420_YUYV( );
......@@ -290,12 +292,12 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
C_YUV420_YUYV( );
}
#else
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
MMX_CALL( MMX_YUV420_YUYV );
}
#endif
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
......@@ -327,7 +329,7 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -335,11 +337,11 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YUYV_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
......@@ -355,7 +357,7 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -363,11 +365,11 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YUYV_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YUYV( );
}
......@@ -389,8 +391,8 @@ static void I420_YUY2( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I420_YVYU: planar YUV 4:2:0 to packed YVYU 4:2:2
*****************************************************************************/
static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_YVYU( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
......@@ -424,14 +426,14 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
vector unsigned char vu_vec;
vector unsigned char y_vec;
if( !( ( p_vout->render.i_width % 32 ) |
( p_vout->render.i_height % 2 ) ) )
if( !( ( p_filter->fmt_in.video.i_width % 32 ) |
( p_filter->fmt_in.video.i_height % 2 ) ) )
{
/* Width is a multiple of 32, we take 2 lines at a time */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
VEC_NEXT_LINES( );
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -439,15 +441,15 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
}
}
}
else if( !( ( p_vout->render.i_width % 16 ) |
( p_vout->render.i_height % 4 ) ) )
else if( !( ( p_filter->fmt_in.video.i_width % 16 ) |
( p_filter->fmt_in.video.i_height % 4 ) ) )
{
/* Width is only a multiple of 16, we take 4 lines at a time */
for( i_y = p_vout->render.i_height / 4 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 4 ; i_y-- ; )
{
/* Line 1 and 2, pixels 0 to ( width - 16 ) */
VEC_NEXT_LINES( );
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -463,7 +465,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
VEC_MERGE( vec_mergel );
/* Line 3 and 4, pixels 16 to ( width ) */
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -487,7 +489,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -495,7 +497,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_YVYU( );
......@@ -506,7 +508,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
MMX_CALL( MMX_YUV420_YVYU );
#endif
}
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
......@@ -537,7 +539,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -545,11 +547,11 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YVYU_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
......@@ -565,7 +567,7 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -573,11 +575,11 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_YVYU_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_YVYU( );
}
......@@ -598,8 +600,8 @@ static void I420_YVYU( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I420_UYVY: planar YUV 4:2:0 to packed UYVY 4:2:2
*****************************************************************************/
static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_UYVY( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
......@@ -633,14 +635,14 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
vector unsigned char uv_vec;
vector unsigned char y_vec;
if( !( ( p_vout->render.i_width % 32 ) |
( p_vout->render.i_height % 2 ) ) )
if( !( ( p_filter->fmt_in.video.i_width % 32 ) |
( p_filter->fmt_in.video.i_height % 2 ) ) )
{
/* Width is a multiple of 32, we take 2 lines at a time */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
VEC_NEXT_LINES( );
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -648,15 +650,15 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
}
}
}
else if( !( ( p_vout->render.i_width % 16 ) |
( p_vout->render.i_height % 4 ) ) )
else if( !( ( p_filter->fmt_in.video.i_width % 16 ) |
( p_filter->fmt_in.video.i_height % 4 ) ) )
{
/* Width is only a multiple of 16, we take 4 lines at a time */
for( i_y = p_vout->render.i_height / 4 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 4 ; i_y-- ; )
{
/* Line 1 and 2, pixels 0 to ( width - 16 ) */
VEC_NEXT_LINES( );
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -672,7 +674,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
VEC_MERGE( vec_mergel );
/* Line 3 and 4, pixels 16 to ( width ) */
for( i_x = p_vout->render.i_width / 32 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 32 ; i_x-- ; )
{
VEC_LOAD_UV( );
VEC_MERGE( vec_mergeh );
......@@ -696,7 +698,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -704,7 +706,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_UYVY( );
......@@ -715,7 +717,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
MMX_CALL( MMX_YUV420_UYVY );
#endif
}
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x--; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x--; )
{
C_YUV420_UYVY( );
}
......@@ -746,7 +748,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -754,11 +756,11 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
......@@ -774,7 +776,7 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -782,11 +784,11 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
......@@ -808,19 +810,19 @@ static void I420_UYVY( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I420_IUYV: planar YUV 4:2:0 to interleaved packed UYVY 4:2:2
*****************************************************************************/
static void I420_IUYV( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_IUYV( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
VLC_UNUSED(p_source); VLC_UNUSED(p_dest);
/* FIXME: TODO ! */
msg_Err( p_vout, "I420_IUYV unimplemented, please harass <sam@zoy.org>" );
msg_Err( p_filter, "I420_IUYV unimplemented, please harass <sam@zoy.org>" );
}
/*****************************************************************************
* I420_cyuv: planar YUV 4:2:0 to upside-down packed UYVY 4:2:2
*****************************************************************************/
static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_cyuv( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line1 = p_dest->p->p_pixels +
p_dest->p->i_visible_lines * p_dest->p->i_pitch
......@@ -841,7 +843,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
- p_dest->p->i_visible_pitch;
#if !defined(MODULE_NAME_IS_i420_yuy2_sse2)
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 -= 3 * p_dest->p->i_pitch;
p_line2 -= 3 * p_dest->p->i_pitch;
......@@ -849,7 +851,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if !defined (MODULE_NAME_IS_i420_yuy2_mmx)
C_YUV420_UYVY( );
......@@ -860,7 +862,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
MMX_CALL( MMX_YUV420_UYVY );
#endif
}
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
......@@ -887,7 +889,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line2|(intptr_t)p_y2))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -895,11 +897,11 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
......@@ -915,7 +917,7 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
else
{
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -923,11 +925,11 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV420_UYVY_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV420_UYVY( );
}
......@@ -950,8 +952,8 @@ static void I420_cyuv( vout_thread_t *p_vout, picture_t *p_source,
* I420_Y211: planar YUV 4:2:0 to packed YUYV 2:1:1
*****************************************************************************/
#if defined (MODULE_NAME_IS_i420_yuy2)
static void I420_Y211( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I420_Y211( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line1, *p_line2 = p_dest->p->p_pixels;
uint8_t *p_y1, *p_y2 = p_source->Y_PIXELS;
......@@ -967,7 +969,7 @@ static void I420_Y211( vout_thread_t *p_vout, picture_t *p_source,
const int i_dest_margin = p_dest->p->i_pitch
- p_dest->p->i_visible_pitch;
for( i_y = p_vout->render.i_height / 2 ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height / 2 ; i_y-- ; )
{
p_line1 = p_line2;
p_line2 += p_dest->p->i_pitch;
......@@ -975,7 +977,7 @@ static void I420_Y211( vout_thread_t *p_vout, picture_t *p_source,
p_y1 = p_y2;
p_y2 += p_source->p[Y_PLANE].i_pitch;
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
C_YUV420_Y211( );
C_YUV420_Y211( );
......
......@@ -32,6 +32,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "I422,J422"
......@@ -42,9 +43,9 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void I422_I420( vout_thread_t *, picture_t *, picture_t * );
static void I422_YV12( vout_thread_t *, picture_t *, picture_t * );
static void I422_YUVA( vout_thread_t *, picture_t *, picture_t * );
static void I422_I420( filter_t *, picture_t *, picture_t * );
static void I422_YV12( filter_t *, picture_t *, picture_t * );
static void I422_YUVA( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
......@@ -62,31 +63,32 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_in.video.i_width & 1
|| p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('I','4','2','2'):
case VLC_FOURCC('J','4','2','2'):
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('I','Y','U','V'):
case VLC_FOURCC('J','4','2','0'):
p_vout->chroma.pf_convert = I422_I420;
p_filter->pf_video_filter_io = I422_I420;
break;
case VLC_FOURCC('Y','V','1','2'):
p_vout->chroma.pf_convert = I422_YV12;
p_filter->pf_video_filter_io = I422_YV12;
break;
case VLC_FOURCC('Y','U','V','A'):
p_vout->chroma.pf_convert = I422_YUVA;
p_filter->pf_video_filter_io = I422_YUVA;
break;
default:
......@@ -105,15 +107,15 @@ static int Activate( vlc_object_t *p_this )
/*****************************************************************************
* I422_I420: planar YUV 4:2:2 to planar I420 4:2:0 Y:U:V
*****************************************************************************/
static void I422_I420( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_I420( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint16_t i_dpy = p_dest->p[Y_PLANE].i_pitch;
uint16_t i_spy = p_source->p[Y_PLANE].i_pitch;
uint16_t i_dpuv = p_dest->p[U_PLANE].i_pitch;
uint16_t i_spuv = p_source->p[U_PLANE].i_pitch;
uint16_t i_width = p_vout->render.i_width;
uint16_t i_y = p_vout->render.i_height;
uint16_t i_width = p_filter->fmt_in.video.i_width;
uint16_t i_y = p_filter->fmt_in.video.i_height;
uint8_t *p_dy = p_dest->Y_PIXELS + (i_y-1)*i_dpy;
uint8_t *p_y = p_source->Y_PIXELS + (i_y-1)*i_spy;
uint8_t *p_du = p_dest->U_PIXELS + (i_y/2-1)*i_dpuv;
......@@ -134,15 +136,15 @@ static void I422_I420( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I422_YV12: planar YUV 4:2:2 to planar YV12 4:2:0 Y:V:U
*****************************************************************************/
static void I422_YV12( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_YV12( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint16_t i_dpy = p_dest->p[Y_PLANE].i_pitch;
uint16_t i_spy = p_source->p[Y_PLANE].i_pitch;
uint16_t i_dpuv = p_dest->p[U_PLANE].i_pitch;
uint16_t i_spuv = p_source->p[U_PLANE].i_pitch;
uint16_t i_width = p_vout->render.i_width;
uint16_t i_y = p_vout->render.i_height;
uint16_t i_width = p_filter->fmt_in.video.i_width;
uint16_t i_y = p_filter->fmt_in.video.i_height;
uint8_t *p_dy = p_dest->Y_PIXELS + (i_y-1)*i_dpy;
uint8_t *p_y = p_source->Y_PIXELS + (i_y-1)*i_spy;
uint8_t *p_du = p_dest->V_PIXELS + (i_y/2-1)*i_dpuv; /* U and V are swapped */
......@@ -163,10 +165,10 @@ static void I422_YV12( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I422_YUVA: planar YUV 4:2:2 to planar YUVA 4:2:0:4 Y:U:V:A
*****************************************************************************/
static void I422_YUVA( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_YUVA( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
I422_I420( p_vout, p_source, p_dest );
I422_I420( p_filter, p_source, p_dest );
vlc_memset( p_dest->p[A_PLANE].p_pixels, 0xff,
p_dest->p[A_PLANE].i_lines * p_dest->p[A_PLANE].i_pitch );
}
......@@ -32,6 +32,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#include "i422_yuy2.h"
......@@ -48,14 +49,14 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void I422_YUY2 ( vout_thread_t *, picture_t *, picture_t * );
static void I422_YVYU ( vout_thread_t *, picture_t *, picture_t * );
static void I422_UYVY ( vout_thread_t *, picture_t *, picture_t * );
static void I422_IUYV ( vout_thread_t *, picture_t *, picture_t * );
static void I422_cyuv ( vout_thread_t *, picture_t *, picture_t * );
static void I422_YUY2 ( filter_t *, picture_t *, picture_t * );
static void I422_YVYU ( filter_t *, picture_t *, picture_t * );
static void I422_UYVY ( filter_t *, picture_t *, picture_t * );
static void I422_IUYV ( filter_t *, picture_t *, picture_t * );
static void I422_cyuv ( filter_t *, picture_t *, picture_t * );
#if defined (MODULE_NAME_IS_i422_yuy2)
static void I422_Y211 ( vout_thread_t *, picture_t *, picture_t * );
static void I422_Y211 ( vout_thread_t *, picture_t *, picture_t * );
static void I422_Y211 ( filter_t *, picture_t *, picture_t * );
static void I422_Y211 ( filter_t *, picture_t *, picture_t * );
#endif
/*****************************************************************************
......@@ -84,44 +85,45 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_in.video.i_width & 1
|| p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('I','4','2','2'):
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
p_vout->chroma.pf_convert = I422_YUY2;
p_filter->pf_video_filter_io = I422_YUY2;
break;
case VLC_FOURCC('Y','V','Y','U'):
p_vout->chroma.pf_convert = I422_YVYU;
p_filter->pf_video_filter_io = I422_YVYU;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
p_vout->chroma.pf_convert = I422_UYVY;
p_filter->pf_video_filter_io = I422_UYVY;
break;
case VLC_FOURCC('I','U','Y','V'):
p_vout->chroma.pf_convert = I422_IUYV;
p_filter->pf_video_filter_io = I422_IUYV;
break;
case VLC_FOURCC('c','y','u','v'):
p_vout->chroma.pf_convert = I422_cyuv;
p_filter->pf_video_filter_io = I422_cyuv;
break;
#if defined (MODULE_NAME_IS_i422_yuy2)
case VLC_FOURCC('Y','2','1','1'):
p_vout->chroma.pf_convert = I422_Y211;
p_filter->pf_video_filter_io = I422_Y211;
break;
#endif
......@@ -141,8 +143,8 @@ static int Activate( vlc_object_t *p_this )
/*****************************************************************************
* I422_YUY2: planar YUV 4:2:2 to packed YUY2 4:2:2
*****************************************************************************/
static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_YUY2( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels;
uint8_t *p_y = p_source->Y_PIXELS;
......@@ -164,13 +166,13 @@ static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YUYV_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
}
......@@ -182,13 +184,13 @@ static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
}
else {
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YUYV_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
}
......@@ -202,9 +204,9 @@ static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
#else
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
......@@ -215,7 +217,7 @@ static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
MMX_CALL( MMX_YUV422_YUYV );
#endif
}
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV422_YUYV( p_line, p_y, p_u, p_v );
}
......@@ -234,8 +236,8 @@ static void I422_YUY2( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I422_YVYU: planar YUV 4:2:2 to packed YVYU 4:2:2
*****************************************************************************/
static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_YVYU( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels;
uint8_t *p_y = p_source->Y_PIXELS;
......@@ -257,13 +259,13 @@ static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YVYU_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
}
......@@ -275,13 +277,13 @@ static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
}
else {
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_YVYU_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
}
......@@ -295,9 +297,9 @@ static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
#else
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
......@@ -308,7 +310,7 @@ static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
MMX_CALL( MMX_YUV422_YVYU );
#endif
}
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV422_YVYU( p_line, p_y, p_u, p_v );
}
......@@ -327,8 +329,8 @@ static void I422_YVYU( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I422_UYVY: planar YUV 4:2:2 to packed UYVY 4:2:2
*****************************************************************************/
static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_UYVY( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels;
uint8_t *p_y = p_source->Y_PIXELS;
......@@ -350,13 +352,13 @@ static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
......@@ -368,13 +370,13 @@ static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
}
else {
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
......@@ -388,9 +390,9 @@ static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
#else
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
#if defined (MODULE_NAME_IS_i422_yuy2)
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
......@@ -401,7 +403,7 @@ static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
MMX_CALL( MMX_YUV422_UYVY );
#endif
}
for( i_x = ( p_vout->render.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
......@@ -420,19 +422,19 @@ static void I422_UYVY( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* I422_IUYV: planar YUV 4:2:2 to interleaved packed IUYV 4:2:2
*****************************************************************************/
static void I422_IUYV( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_IUYV( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
VLC_UNUSED(p_source); VLC_UNUSED(p_dest);
/* FIXME: TODO ! */
msg_Err( p_vout, "I422_IUYV unimplemented, please harass <sam@zoy.org>" );
msg_Err( p_filter, "I422_IUYV unimplemented, please harass <sam@zoy.org>" );
}
/*****************************************************************************
* I422_cyuv: planar YUV 4:2:2 to upside-down packed UYVY 4:2:2
*****************************************************************************/
static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_cyuv( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels + p_dest->p->i_visible_lines * p_dest->p->i_pitch;
uint8_t *p_y = p_source->Y_PIXELS;
......@@ -454,15 +456,15 @@ static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
((intptr_t)p_line|(intptr_t)p_y))) )
{
/* use faster SSE2 aligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
p_line -= 2 * p_dest->p->i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_ALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
......@@ -474,15 +476,15 @@ static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
}
else {
/* use slower SSE2 unaligned fetch and store */
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
p_line -= 2 * p_dest->p->i_pitch;
for( i_x = p_vout->render.i_width / 16 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 16 ; i_x-- ; )
{
SSE2_CALL( SSE2_YUV422_UYVY_UNALIGNED );
}
for( i_x = ( p_vout->render.i_width % 16 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_in.video.i_width % 16 ) / 2; i_x-- ; )
{
C_YUV422_UYVY( p_line, p_y, p_u, p_v );
}
......@@ -496,9 +498,9 @@ static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
#else
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
p_line -= 2 * p_dest->p->i_pitch;
......@@ -529,8 +531,8 @@ static void I422_cyuv( vout_thread_t *p_vout, picture_t *p_source,
* I422_Y211: planar YUV 4:2:2 to packed YUYV 2:1:1
*****************************************************************************/
#if defined (MODULE_NAME_IS_i422_yuy2)
static void I422_Y211( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void I422_Y211( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_dest->p->p_pixels + p_dest->p->i_visible_lines * p_dest->p->i_pitch;
uint8_t *p_y = p_source->Y_PIXELS;
......@@ -539,9 +541,9 @@ static void I422_Y211( vout_thread_t *p_vout, picture_t *p_source,
int i_x, i_y;
for( i_y = p_vout->render.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_in.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->render.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_in.video.i_width / 8 ; i_x-- ; )
{
C_YUV422_Y211( p_line, p_y, p_u, p_v );
C_YUV422_Y211( p_line, p_y, p_u, p_v );
......
......@@ -31,6 +31,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,cyuv"
......@@ -41,10 +42,10 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void YUY2_I420 ( vout_thread_t *, picture_t *, picture_t * );
static void YVYU_I420 ( vout_thread_t *, picture_t *, picture_t * );
static void UYVY_I420 ( vout_thread_t *, picture_t *, picture_t * );
static void cyuv_I420 ( vout_thread_t *, picture_t *, picture_t * );
static void YUY2_I420 ( filter_t *, picture_t *, picture_t * );
static void YVYU_I420 ( filter_t *, picture_t *, picture_t * );
static void UYVY_I420 ( filter_t *, picture_t *, picture_t * );
static void cyuv_I420 ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
......@@ -62,35 +63,36 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_in.video.i_width & 1
|| p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
p_vout->chroma.pf_convert = YUY2_I420;
p_filter->pf_video_filter_io = YUY2_I420;
break;
case VLC_FOURCC('Y','V','Y','U'):
p_vout->chroma.pf_convert = YVYU_I420;
p_filter->pf_video_filter_io = YVYU_I420;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
p_vout->chroma.pf_convert = UYVY_I420;
p_filter->pf_video_filter_io = UYVY_I420;
break;
case VLC_FOURCC('c','y','u','v'):
p_vout->chroma.pf_convert = cyuv_I420;
p_filter->pf_video_filter_io = cyuv_I420;
break;
default:
......@@ -109,8 +111,8 @@ static int Activate( vlc_object_t *p_this )
/*****************************************************************************
* YUY2_I420: packed YUY2 4:2:2 to planar YUV 4:2:0
*****************************************************************************/
static void YUY2_I420( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void YUY2_I420( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -129,11 +131,11 @@ static void YUY2_I420( vout_thread_t *p_vout, picture_t *p_source,
bool b_skip = false;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; p_line++; \
......@@ -143,14 +145,14 @@ static void YUY2_I420( vout_thread_t *p_vout, picture_t *p_source,
C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v );
C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUYV_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YUYV_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_u++ = *p_line++; \
......@@ -160,7 +162,7 @@ static void YUY2_I420( vout_thread_t *p_vout, picture_t *p_source,
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
......@@ -177,8 +179,8 @@ static void YUY2_I420( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* YVYU_I420: packed YVYU 4:2:2 to planar YUV 4:2:0
*****************************************************************************/
static void YVYU_I420( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void YVYU_I420( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -197,11 +199,11 @@ static void YVYU_I420( vout_thread_t *p_vout, picture_t *p_source,
bool b_skip = false;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; p_line++; \
......@@ -211,14 +213,14 @@ static void YVYU_I420( vout_thread_t *p_vout, picture_t *p_source,
C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v );
C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YVYU_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YVYU_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
......@@ -228,7 +230,7 @@ static void YVYU_I420( vout_thread_t *p_vout, picture_t *p_source,
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
......@@ -245,8 +247,8 @@ static void YVYU_I420( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* UYVY_I420: packed UYVY 4:2:2 to planar YUV 4:2:0
*****************************************************************************/
static void UYVY_I420( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void UYVY_I420( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -265,11 +267,11 @@ static void UYVY_I420( vout_thread_t *p_vout, picture_t *p_source,
bool b_skip = false;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_u++ = *p_line++; p_line++; \
......@@ -279,14 +281,14 @@ static void UYVY_I420( vout_thread_t *p_vout, picture_t *p_source,
C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v );
C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_UYVY_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_UYVY_YUV422( p_line, p_y, p_u, p_v ) \
*p_u++ = *p_line++; *p_y++ = *p_line++; \
......@@ -296,7 +298,7 @@ static void UYVY_I420( vout_thread_t *p_vout, picture_t *p_source,
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
......@@ -314,8 +316,8 @@ static void UYVY_I420( vout_thread_t *p_vout, picture_t *p_source,
* cyuv_I420: upside-down packed UYVY 4:2:2 to planar YUV 4:2:0
* FIXME
*****************************************************************************/
static void cyuv_I420( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void cyuv_I420( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -334,11 +336,11 @@ static void cyuv_I420( vout_thread_t *p_vout, picture_t *p_source,
bool b_skip = false;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
if( b_skip )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; p_line++; \
......@@ -348,14 +350,14 @@ static void cyuv_I420( vout_thread_t *p_vout, picture_t *p_source,
C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v );
C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_cyuv_YUV422_skip( p_line, p_y, p_u, p_v );
}
}
else
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_cyuv_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
......@@ -365,7 +367,7 @@ static void cyuv_I420( vout_thread_t *p_vout, picture_t *p_source,
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
......
......@@ -31,6 +31,7 @@
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include <vlc_vout.h>
#define SRC_FOURCC "YUY2,YUNV,YVYU,UYVY,UYNV,Y422,cyuv"
......@@ -41,10 +42,10 @@
*****************************************************************************/
static int Activate ( vlc_object_t * );
static void YUY2_I422 ( vout_thread_t *, picture_t *, picture_t * );
static void YVYU_I422 ( vout_thread_t *, picture_t *, picture_t * );
static void UYVY_I422 ( vout_thread_t *, picture_t *, picture_t * );
static void cyuv_I422 ( vout_thread_t *, picture_t *, picture_t * );
static void YUY2_I422 ( filter_t *, picture_t *, picture_t * );
static void YVYU_I422 ( filter_t *, picture_t *, picture_t * );
static void UYVY_I422 ( filter_t *, picture_t *, picture_t * );
static void cyuv_I422 ( filter_t *, picture_t *, picture_t * );
/*****************************************************************************
* Module descriptor
......@@ -62,35 +63,36 @@ vlc_module_end();
*****************************************************************************/
static int Activate( vlc_object_t *p_this )
{
vout_thread_t *p_vout = (vout_thread_t *)p_this;
filter_t *p_filter = (filter_t *)p_this;
if( p_vout->render.i_width & 1 || p_vout->render.i_height & 1 )
if( p_filter->fmt_in.video.i_width & 1
|| p_filter->fmt_in.video.i_height & 1 )
{
return -1;
}
switch( p_vout->output.i_chroma )
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','2'):
switch( p_vout->render.i_chroma )
switch( p_filter->fmt_in.video.i_chroma )
{
case VLC_FOURCC('Y','U','Y','2'):
case VLC_FOURCC('Y','U','N','V'):
p_vout->chroma.pf_convert = YUY2_I422;
p_filter->pf_video_filter_io = YUY2_I422;
break;
case VLC_FOURCC('Y','V','Y','U'):
p_vout->chroma.pf_convert = YVYU_I422;
p_filter->pf_video_filter_io = YVYU_I422;
break;
case VLC_FOURCC('U','Y','V','Y'):
case VLC_FOURCC('U','Y','N','V'):
case VLC_FOURCC('Y','4','2','2'):
p_vout->chroma.pf_convert = UYVY_I422;
p_filter->pf_video_filter_io = UYVY_I422;
break;
case VLC_FOURCC('c','y','u','v'):
p_vout->chroma.pf_convert = cyuv_I422;
p_filter->pf_video_filter_io = cyuv_I422;
break;
default:
......@@ -109,8 +111,8 @@ static int Activate( vlc_object_t *p_this )
/*****************************************************************************
* YUY2_I422: packed YUY2 4:2:2 to planar YUV 4:2:2
*****************************************************************************/
static void YUY2_I422( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void YUY2_I422( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -127,9 +129,9 @@ static void YUY2_I422( vout_thread_t *p_vout, picture_t *p_source,
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YUYV_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_u++ = *p_line++; \
......@@ -139,7 +141,7 @@ static void YUY2_I422( vout_thread_t *p_vout, picture_t *p_source,
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YUYV_YUV422( p_line, p_y, p_u, p_v );
}
......@@ -153,8 +155,8 @@ static void YUY2_I422( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* YVYU_I422: packed YVYU 4:2:2 to planar YUV 4:2:2
*****************************************************************************/
static void YVYU_I422( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void YVYU_I422( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -171,9 +173,9 @@ static void YVYU_I422( vout_thread_t *p_vout, picture_t *p_source,
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_YVYU_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
......@@ -183,7 +185,7 @@ static void YVYU_I422( vout_thread_t *p_vout, picture_t *p_source,
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_YVYU_YUV422( p_line, p_y, p_u, p_v );
}
......@@ -197,8 +199,8 @@ static void YVYU_I422( vout_thread_t *p_vout, picture_t *p_source,
/*****************************************************************************
* UYVY_I422: packed UYVY 4:2:2 to planar YUV 4:2:2
*****************************************************************************/
static void UYVY_I422( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void UYVY_I422( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -215,9 +217,9 @@ static void UYVY_I422( vout_thread_t *p_vout, picture_t *p_source,
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_UYVY_YUV422( p_line, p_y, p_u, p_v ) \
*p_u++ = *p_line++; *p_y++ = *p_line++; \
......@@ -227,7 +229,7 @@ static void UYVY_I422( vout_thread_t *p_vout, picture_t *p_source,
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_UYVY_YUV422( p_line, p_y, p_u, p_v );
}
......@@ -242,8 +244,8 @@ static void UYVY_I422( vout_thread_t *p_vout, picture_t *p_source,
* cyuv_I422: upside-down packed UYVY 4:2:2 to planar YUV 4:2:2
* FIXME
*****************************************************************************/
static void cyuv_I422( vout_thread_t *p_vout, picture_t *p_source,
picture_t *p_dest )
static void cyuv_I422( filter_t *p_filter, picture_t *p_source,
picture_t *p_dest )
{
uint8_t *p_line = p_source->p->p_pixels;
......@@ -260,9 +262,9 @@ static void cyuv_I422( vout_thread_t *p_vout, picture_t *p_source,
const int i_source_margin = p_source->p->i_pitch
- p_source->p->i_visible_pitch;
for( i_y = p_vout->output.i_height ; i_y-- ; )
for( i_y = p_filter->fmt_out.video.i_height ; i_y-- ; )
{
for( i_x = p_vout->output.i_width / 8 ; i_x-- ; )
for( i_x = p_filter->fmt_out.video.i_width / 8 ; i_x-- ; )
{
#define C_cyuv_YUV422( p_line, p_y, p_u, p_v ) \
*p_y++ = *p_line++; *p_v++ = *p_line++; \
......@@ -272,7 +274,7 @@ static void cyuv_I422( vout_thread_t *p_vout, picture_t *p_source,
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
for( i_x = ( p_vout->output.i_width % 8 ) / 2; i_x-- ; )
for( i_x = ( p_filter->fmt_out.video.i_width % 8 ) / 2; i_x-- ; )
{
C_cyuv_YUV422( p_line, p_y, p_u, p_v );
}
......
......@@ -655,13 +655,34 @@ static int InitThread( vout_thread_t *p_vout )
p_vout->b_direct = 0;
/* Choose the best module */
p_vout->chroma.p_module = module_Need( p_vout, "chroma", NULL, 0 );
if( p_vout->chroma.p_module == NULL )
p_vout->p_chroma = vlc_object_create( p_vout, VLC_OBJECT_FILTER );
filter_t *p_chroma = p_vout->p_chroma;
vlc_object_attach( p_chroma, p_vout );
/* TODO: Set the fmt_in and fmt_out stuff here */
p_chroma->fmt_in.video = p_vout->fmt_render;
p_chroma->fmt_out.video = p_vout->fmt_out;
/* TODO: put in a function */
p_chroma->fmt_out.video.i_rmask = p_vout->output.i_rmask;
p_chroma->fmt_out.video.i_gmask = p_vout->output.i_gmask;
p_chroma->fmt_out.video.i_bmask = p_vout->output.i_bmask;
p_chroma->fmt_out.video.i_rrshift = p_vout->output.i_rrshift;
p_chroma->fmt_out.video.i_lrshift = p_vout->output.i_lrshift;
p_chroma->fmt_out.video.i_rgshift = p_vout->output.i_rgshift;
p_chroma->fmt_out.video.i_lgshift = p_vout->output.i_lgshift;
p_chroma->fmt_out.video.i_rbshift = p_vout->output.i_rbshift;
p_chroma->fmt_out.video.i_lbshift = p_vout->output.i_lbshift;
msg_Err( p_vout, "HOLA! %4.4s\n", (char*)&p_chroma->fmt_in.video.i_chroma );
msg_Err( p_vout, "HOLA! %4.4s\n", (char*)&p_chroma->fmt_out.video.i_chroma );
p_chroma->p_module = module_Need( p_chroma, "chroma", NULL, 0 );
if( p_chroma->p_module == NULL )
{
msg_Err( p_vout, "no chroma module for %4.4s to %4.4s",
(char*)&p_vout->render.i_chroma,
(char*)&p_vout->output.i_chroma );
vlc_object_detach( p_vout->p_chroma );
p_vout->p_chroma = NULL;
p_vout->pf_end( p_vout );
vlc_mutex_unlock( &p_vout->change_lock );
return VLC_EGENERIC;
......@@ -1153,11 +1174,11 @@ static void RunThread( vout_thread_t *p_vout)
}
/* Need to reinitialise the chroma plugin */
if( p_vout->chroma.p_module )
if( p_vout->p_chroma->p_module )
{
if( p_vout->chroma.p_module->pf_deactivate )
p_vout->chroma.p_module->pf_deactivate( VLC_OBJECT(p_vout) );
p_vout->chroma.p_module->pf_activate( VLC_OBJECT(p_vout) );
if( p_vout->p_chroma->p_module->pf_deactivate )
p_vout->p_chroma->p_module->pf_deactivate( VLC_OBJECT(p_vout->p_chroma) );
p_vout->p_chroma->p_module->pf_activate( VLC_OBJECT(p_vout->p_chroma) );
}
}
......@@ -1172,7 +1193,8 @@ static void RunThread( vout_thread_t *p_vout)
if( !p_vout->b_direct )
{
module_Unneed( p_vout, p_vout->chroma.p_module );
module_Unneed( p_vout->p_chroma, p_vout->p_chroma->p_module );
p_vout->p_chroma = NULL;
}
vlc_mutex_lock( &p_vout->picture_lock );
......@@ -1245,7 +1267,8 @@ static void EndThread( vout_thread_t *p_vout )
if( !p_vout->b_direct )
{
module_Unneed( p_vout, p_vout->chroma.p_module );
module_Unneed( p_vout->p_chroma, p_vout->p_chroma->p_module );
p_vout->p_chroma->p_module = NULL;
}
/* Destroy all remaining pictures */
......
......@@ -33,6 +33,7 @@
#include <vlc_common.h>
#include <vlc_vout.h>
#include <vlc_osd.h>
#include <vlc_filter.h>
#include "vout_pictures.h"
#include <assert.h>
......@@ -377,7 +378,7 @@ picture_t * vout_RenderPicture( vout_thread_t *p_vout, picture_t *p_pic,
}
/* Convert image to the first direct buffer */
p_vout->chroma.pf_convert( p_vout, p_pic, p_tmp_pic );
p_vout->p_chroma->pf_video_filter_io( p_vout->p_chroma, p_pic, p_tmp_pic );
/* Render subpictures on the first direct buffer */
spu_RenderSubpictures( p_vout->p_spu, &p_vout->fmt_out, p_tmp_pic,
......@@ -397,7 +398,7 @@ picture_t * vout_RenderPicture( vout_thread_t *p_vout, picture_t *p_pic,
return NULL;
/* Convert image to the first direct buffer */
p_vout->chroma.pf_convert( p_vout, p_pic, &p_vout->p_picture[0] );
p_vout->p_chroma->pf_video_filter_io( p_vout->p_chroma, p_pic, &p_vout->p_picture[0] );
/* Render subpictures on the first direct buffer */
spu_RenderSubpictures( p_vout->p_spu, &p_vout->fmt_out,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment