Commit 3d1de17e authored by Antoine Cellerier's avatar Antoine Cellerier

Untested RGBA blending in I420, RV24 and RV32.

parent f49699d4
/***************************************************************************** /*****************************************************************************
* blend.c: alpha blend 2 pictures together * blend.c: alpha blend 2 pictures together
***************************************************************************** *****************************************************************************
* Copyright (C) 2003 the VideoLAN team * Copyright (C) 2003-2007 the VideoLAN team
* $Id$ * $Id$
* *
* Author: Gildas Bazin <gbazin@videolan.org> * Authors: Gildas Bazin <gbazin@videolan.org>
* Antoine Cellerier <dionoea @t videolan dot org>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -43,6 +44,7 @@ static int OpenFilter ( vlc_object_t * ); ...@@ -43,6 +44,7 @@ static int OpenFilter ( vlc_object_t * );
static void CloseFilter( vlc_object_t * ); static void CloseFilter( vlc_object_t * );
/* TODO i_alpha support for BlendR16 */ /* TODO i_alpha support for BlendR16 */
/* YUVA */
static void Blend( filter_t *, picture_t *, picture_t *, picture_t *, static void Blend( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int ); int, int, int );
static void BlendI420( filter_t *, picture_t *, picture_t *, picture_t *, static void BlendI420( filter_t *, picture_t *, picture_t *, picture_t *,
...@@ -53,6 +55,8 @@ static void BlendR24( filter_t *, picture_t *, picture_t *, picture_t *, ...@@ -53,6 +55,8 @@ static void BlendR24( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int, int, int ); int, int, int, int, int );
static void BlendYUVPacked( filter_t *, picture_t *, picture_t *, picture_t *, static void BlendYUVPacked( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int, int, int ); int, int, int, int, int );
/* YUVP */
static void BlendPalI420( filter_t *, picture_t *, picture_t *, picture_t *, static void BlendPalI420( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int, int, int ); int, int, int, int, int );
static void BlendPalYUVPacked( filter_t *, picture_t *, picture_t *, picture_t *, static void BlendPalYUVPacked( filter_t *, picture_t *, picture_t *, picture_t *,
...@@ -60,6 +64,12 @@ static void BlendPalYUVPacked( filter_t *, picture_t *, picture_t *, picture_t * ...@@ -60,6 +64,12 @@ static void BlendPalYUVPacked( filter_t *, picture_t *, picture_t *, picture_t *
static void BlendPalRV( filter_t *, picture_t *, picture_t *, picture_t *, static void BlendPalRV( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int, int, int ); int, int, int, int, int );
/* RGBA */
static void BlendRGBAI420( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int, int, int );
static void BlendRGBAR24( filter_t *, picture_t *, picture_t *, picture_t *,
int, int, int, int, int );
/***************************************************************************** /*****************************************************************************
* Module descriptor * Module descriptor
*****************************************************************************/ *****************************************************************************/
...@@ -79,16 +89,26 @@ static int OpenFilter( vlc_object_t *p_this ) ...@@ -79,16 +89,26 @@ static int OpenFilter( vlc_object_t *p_this )
/* Check if we can handle that format. /* Check if we can handle that format.
* We could try to use a chroma filter if we can't. */ * We could try to use a chroma filter if we can't. */
if( ( p_filter->fmt_in.video.i_chroma != VLC_FOURCC('Y','U','V','A') && int in_chroma = p_filter->fmt_in.video.i_chroma;
p_filter->fmt_in.video.i_chroma != VLC_FOURCC('Y','U','V','P') ) || int out_chroma = p_filter->fmt_out.video.i_chroma;
( p_filter->fmt_out.video.i_chroma != VLC_FOURCC('I','4','2','0') && if(
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('Y','U','Y','2') && ( ( in_chroma != VLC_FOURCC('Y','U','V','A') &&
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('Y','V','1','2') && in_chroma != VLC_FOURCC('Y','U','V','P') ) ||
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('U','Y','V','Y') && ( out_chroma != VLC_FOURCC('I','4','2','0') &&
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('Y','V','Y','U') && out_chroma != VLC_FOURCC('Y','U','Y','2') &&
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('R','V','1','6') && out_chroma != VLC_FOURCC('Y','V','1','2') &&
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('R','V','2','4') && out_chroma != VLC_FOURCC('U','Y','V','Y') &&
p_filter->fmt_out.video.i_chroma != VLC_FOURCC('R','V','3','2') ) ) out_chroma != VLC_FOURCC('Y','V','Y','U') &&
out_chroma != VLC_FOURCC('R','V','1','6') &&
out_chroma != VLC_FOURCC('R','V','2','4') &&
out_chroma != VLC_FOURCC('R','V','3','2') ) )
&&
( ( in_chroma != VLC_FOURCC('R','G','B','A') ) ||
( out_chroma != VLC_FOURCC('I','4','2','0') &&
out_chroma != VLC_FOURCC('Y','V','1','2') &&
out_chroma != VLC_FOURCC('R','V','2','4') &&
out_chroma != VLC_FOURCC('R','V','3','2' ) ) )
)
{ {
return VLC_EGENERIC; return VLC_EGENERIC;
} }
...@@ -112,6 +132,17 @@ static int OpenFilter( vlc_object_t *p_this ) ...@@ -112,6 +132,17 @@ static int OpenFilter( vlc_object_t *p_this )
return VLC_SUCCESS; return VLC_SUCCESS;
} }
/*****************************************************************************
* CloseFilter: clean up the filter
*****************************************************************************/
static void CloseFilter( vlc_object_t *p_this )
{
filter_t *p_filter = (filter_t*)p_this;
filter_sys_t *p_sys = p_filter->p_sys;
free( p_sys );
}
/**************************************************************************** /****************************************************************************
* Blend: the whole thing * Blend: the whole thing
**************************************************************************** ****************************************************************************
...@@ -131,68 +162,118 @@ static void Blend( filter_t *p_filter, picture_t *p_dst, ...@@ -131,68 +162,118 @@ static void Blend( filter_t *p_filter, picture_t *p_dst,
if( i_width <= 0 || i_height <= 0 ) return; if( i_width <= 0 || i_height <= 0 ) return;
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','A') && switch( p_filter->fmt_in.video.i_chroma )
( p_filter->fmt_out.video.i_chroma == VLC_FOURCC('I','4','2','0') ||
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('Y','V','1','2') ) )
{ {
case VLC_FOURCC('Y','U','V','A'):
switch( p_filter->fmt_out.video.i_chroma )
{
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('Y','V','1','2'):
BlendI420( p_filter, p_dst, p_dst_orig, p_src, BlendI420( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} case VLC_FOURCC('Y','U','Y','2'):
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','A') && case VLC_FOURCC('U','Y','V','Y'):
( p_filter->fmt_out.video.i_chroma == VLC_FOURCC('Y','U','Y','2') || case VLC_FOURCC('Y','V','Y','U'):
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('U','Y','V','Y') ||
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('Y','V','Y','U') ) )
{
BlendYUVPacked( p_filter, p_dst, p_dst_orig, p_src, BlendYUVPacked( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} case VLC_FOURCC('R','V','1','6'):
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','A') &&
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('R','V','1','6') )
{
BlendR16( p_filter, p_dst, p_dst_orig, p_src, BlendR16( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} case VLC_FOURCC('R','V','2','4'):
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','A') && case VLC_FOURCC('R','V','3','2'):
( p_filter->fmt_out.video.i_chroma == VLC_FOURCC('R','V','2','4') ||
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('R','V','3','2') ) )
{
BlendR24( p_filter, p_dst, p_dst_orig, p_src, BlendR24( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} }
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','P') && case VLC_FOURCC('Y','U','V','P'):
( p_filter->fmt_out.video.i_chroma == VLC_FOURCC('I','4','2','0') || switch( p_filter->fmt_out.video.i_chroma )
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('Y','V','1','2') ) )
{ {
case VLC_FOURCC('I','4','2','0'):
case VLC_FOURCC('Y','V','1','2'):
BlendPalI420( p_filter, p_dst, p_dst_orig, p_src, BlendPalI420( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} case VLC_FOURCC('Y','U','Y','2'):
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','P') && case VLC_FOURCC('U','Y','V','Y'):
( p_filter->fmt_out.video.i_chroma == VLC_FOURCC('Y','U','Y','2') || case VLC_FOURCC('Y','V','Y','U'):
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('U','Y','V','Y') ||
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('Y','V','Y','U') ) )
{
BlendPalYUVPacked( p_filter, p_dst, p_dst_orig, p_src, BlendPalYUVPacked( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return;
case VLC_FOURCC('R','V','1','6'):
case VLC_FOURCC('R','V','2','4'):
case VLC_FOURCC('R','V','3','2'):
BlendPalRV( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} }
if( p_filter->fmt_in.video.i_chroma == VLC_FOURCC('Y','U','V','P') && case VLC_FOURCC('R','G','B','A'):
( p_filter->fmt_out.video.i_chroma == VLC_FOURCC('R','V','1','6') || switch( p_filter->fmt_out.video.i_chroma )
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('R','V','2','4') ||
p_filter->fmt_out.video.i_chroma == VLC_FOURCC('R','V','3','2') ) )
{ {
BlendPalRV( p_filter, p_dst, p_dst_orig, p_src, case VLC_FOURCC('I','4','2','0'):
i_x_offset, i_y_offset, i_width, i_height, i_alpha ); case VLC_FOURCC('Y','V','1','2'):
BlendRGBAI420( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return;
case VLC_FOURCC('R','V','2','4'):
case VLC_FOURCC('R','V','3','2'):
BlendRGBAR24( p_filter, p_dst, p_dst_orig, p_src,
i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
return; return;
} }
}
msg_Dbg( p_filter, "no matching alpha blending routine" ); msg_Dbg( p_filter, "no matching alpha blending routine" );
} }
/***********************************************************************
* Utils
***********************************************************************/
static inline void yuv_to_rgb( int *r, int *g, int *b,
uint8_t y1, uint8_t u1, uint8_t v1 )
{
/* macros used for YUV pixel conversions */
# define SCALEBITS 10
# define ONE_HALF (1 << (SCALEBITS - 1))
# define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
# define CLAMP( x ) (((x) > 255) ? 255 : ((x) < 0) ? 0 : (x));
int y, cb, cr, r_add, g_add, b_add;
cb = u1 - 128;
cr = v1 - 128;
r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;
g_add = - FIX(0.34414*255.0/224.0) * cb
- FIX(0.71414*255.0/224.0) * cr + ONE_HALF;
b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;
y = (y1 - 16) * FIX(255.0/219.0);
*r = CLAMP((y + r_add) >> SCALEBITS);
*g = CLAMP((y + g_add) >> SCALEBITS);
*b = CLAMP((y + b_add) >> SCALEBITS);
}
static inline void rgb_to_yuv( uint8_t *y, uint8_t *u, uint8_t *v,
int r, int g, int b )
{
*y = ( ( ( 66 * r + 129 * g + 25 * b + 128 ) >> 8 ) + 16 );
*u = ( ( -38 * r - 74 * g + 112 * b + 128 ) >> 8 ) + 128 ;
*v = ( ( 112 * r - 94 * g - 18 * b + 128 ) >> 8 ) + 128 ;
}
/***********************************************************************
* YUVA
***********************************************************************/
static void BlendI420( filter_t *p_filter, picture_t *p_dst, static void BlendI420( filter_t *p_filter, picture_t *p_dst,
picture_t *p_dst_orig, picture_t *p_src, picture_t *p_dst_orig, picture_t *p_src,
int i_x_offset, int i_y_offset, int i_x_offset, int i_y_offset,
...@@ -310,29 +391,6 @@ static void BlendI420( filter_t *p_filter, picture_t *p_dst, ...@@ -310,29 +391,6 @@ static void BlendI420( filter_t *p_filter, picture_t *p_dst,
return; return;
} }
static inline void yuv_to_rgb( int *r, int *g, int *b,
uint8_t y1, uint8_t u1, uint8_t v1 )
{
/* macros used for YUV pixel conversions */
# define SCALEBITS 10
# define ONE_HALF (1 << (SCALEBITS - 1))
# define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
# define CLAMP( x ) (((x) > 255) ? 255 : ((x) < 0) ? 0 : (x));
int y, cb, cr, r_add, g_add, b_add;
cb = u1 - 128;
cr = v1 - 128;
r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;
g_add = - FIX(0.34414*255.0/224.0) * cb
- FIX(0.71414*255.0/224.0) * cr + ONE_HALF;
b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;
y = (y1 - 16) * FIX(255.0/219.0);
*r = CLAMP((y + r_add) >> SCALEBITS);
*g = CLAMP((y + g_add) >> SCALEBITS);
*b = CLAMP((y + b_add) >> SCALEBITS);
}
static void BlendR16( filter_t *p_filter, picture_t *p_dst_pic, static void BlendR16( filter_t *p_filter, picture_t *p_dst_pic,
picture_t *p_dst_orig, picture_t *p_src, picture_t *p_dst_orig, picture_t *p_src,
int i_x_offset, int i_y_offset, int i_x_offset, int i_y_offset,
...@@ -642,6 +700,9 @@ static void BlendYUVPacked( filter_t *p_filter, picture_t *p_dst_pic, ...@@ -642,6 +700,9 @@ static void BlendYUVPacked( filter_t *p_filter, picture_t *p_dst_pic,
return; return;
} }
/***********************************************************************
* YUVP
***********************************************************************/
static void BlendPalI420( filter_t *p_filter, picture_t *p_dst, static void BlendPalI420( filter_t *p_filter, picture_t *p_dst,
picture_t *p_dst_orig, picture_t *p_src, picture_t *p_dst_orig, picture_t *p_src,
int i_x_offset, int i_y_offset, int i_x_offset, int i_y_offset,
...@@ -970,13 +1031,200 @@ static void BlendPalRV( filter_t *p_filter, picture_t *p_dst_pic, ...@@ -970,13 +1031,200 @@ static void BlendPalRV( filter_t *p_filter, picture_t *p_dst_pic,
return; return;
} }
/***************************************************************************** /***********************************************************************
* CloseFilter: clean up the filter * RGBA
*****************************************************************************/ ***********************************************************************/
static void CloseFilter( vlc_object_t *p_this ) static void BlendRGBAI420( filter_t *p_filter, picture_t *p_dst,
picture_t *p_dst_orig, picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{ {
filter_t *p_filter = (filter_t*)p_this; int i_src1_pitch, i_src2_pitch, i_dst_pitch, i_src_pix_pitch;
filter_sys_t *p_sys = p_filter->p_sys; uint8_t *p_src1_y, *p_dst_y;
uint8_t *p_src1_u, *p_dst_u;
uint8_t *p_src1_v, *p_dst_v;
uint8_t *p_src2;
int i_x, i_y, i_trans;
uint8_t y, u, v;
free( p_sys ); vlc_bool_t b_even_scanline = i_y_offset % 2;
i_dst_pitch = p_dst->p[Y_PLANE].i_pitch;
p_dst_y = p_dst->p[Y_PLANE].p_pixels + i_x_offset +
p_filter->fmt_out.video.i_x_offset +
p_dst->p[Y_PLANE].i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_dst_u = p_dst->p[U_PLANE].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[U_PLANE].i_pitch;
p_dst_v = p_dst->p[V_PLANE].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[V_PLANE].i_pitch;
i_src1_pitch = p_dst_orig->p[Y_PLANE].i_pitch;
p_src1_y = p_dst_orig->p[Y_PLANE].p_pixels + i_x_offset +
p_filter->fmt_out.video.i_x_offset +
p_dst_orig->p[Y_PLANE].i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src1_u = p_dst_orig->p[U_PLANE].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst_orig->p[U_PLANE].i_pitch;
p_src1_v = p_dst_orig->p[V_PLANE].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst_orig->p[V_PLANE].i_pitch;
i_src_pix_pitch = p_src->p->i_pixel_pitch;
i_src2_pitch = p_src->p->i_pitch;
p_src2 = p_src->p->p_pixels +
p_filter->fmt_in.video.i_x_offset * i_src2_pitch +
p_src->p->i_pitch * p_filter->fmt_in.video.i_y_offset;
#define MAX_TRANS 255
#define TRANS_BITS 8
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst_y += i_dst_pitch, p_src1_y += i_src1_pitch,
p_dst_u += b_even_scanline ? i_dst_pitch/2 : 0,
p_src1_u += b_even_scanline ? i_src1_pitch/2 : 0,
p_dst_v += b_even_scanline ? i_dst_pitch/2 : 0,
p_src1_v += b_even_scanline ? i_src1_pitch/2 : 0,
p_src2 += i_src2_pitch )
{
b_even_scanline = !b_even_scanline;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
#define R ( p_src2[i_x * i_src_pix_pitch + 0] )
#define G ( p_src2[i_x * i_src_pix_pitch + 1] )
#define B ( p_src2[i_x * i_src_pix_pitch + 2] )
i_trans = ( p_src2[i_x * i_src_pix_pitch + 3] * i_alpha ) / 255;
if( !i_trans )
{
/* Completely transparent. Don't change pixel */
continue;
}
else if( i_trans == MAX_TRANS )
{
/* Completely opaque. Completely overwrite underlying pixel */
rgb_to_yuv( &y, &u, &v, R, G, B );
p_dst_y[i_x] = y;
if( b_even_scanline && i_x % 2 == 0 )
{
p_dst_u[i_x/2] = u;
p_dst_v[i_x/2] = v;
}
continue;
}
/* Blending */
rgb_to_yuv( &y, &u, &v, R, G, B );
p_dst_y[i_x] = ( (uint16_t)y * i_trans +
(uint16_t)p_src1_y[i_x] * (MAX_TRANS - i_trans) )
>> TRANS_BITS;
if( b_even_scanline && i_x % 2 == 0 )
{
p_dst_u[i_x/2] = ( (uint16_t)u * i_trans +
(uint16_t)p_src1_u[i_x/2] * (MAX_TRANS - i_trans) )
>> TRANS_BITS;
p_dst_v[i_x/2] = ( (uint16_t)v * i_trans +
(uint16_t)p_src1_v[i_x/2] * (MAX_TRANS - i_trans) )
>> TRANS_BITS;
}
#undef R
#undef G
#undef B
}
}
#undef MAX_TRANS
#undef TRANS_BITS
return;
}
static void BlendRGBAR24( filter_t *p_filter, picture_t *p_dst_pic,
picture_t *p_dst_orig, picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src1_pitch, i_src2_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src1, *p_src2;
int i_x, i_y, i_pix_pitch, i_trans, i_src_pix_pitch;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src1_pitch = p_dst_orig->p->i_pitch;
p_src1 = p_dst_orig->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_orig->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src_pix_pitch = p_src->p->i_pixel_pitch;
i_src2_pitch = p_src->p->i_pitch;
p_src2 = p_src->p->p_pixels +
p_filter->fmt_in.video.i_x_offset * i_pix_pitch +
p_src->p->i_pitch * p_filter->fmt_in.video.i_y_offset;
#define MAX_TRANS 255
#define TRANS_BITS 8
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch, p_src1 += i_src1_pitch, p_src2 += i_src2_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
#define R ( p_src2[i_x * i_src_pix_pitch + 0] )
#define G ( p_src2[i_x * i_src_pix_pitch + 1] )
#define B ( p_src2[i_x * i_src_pix_pitch + 2] )
i_trans = ( p_src2[i_x * i_src_pix_pitch + 3] * i_alpha ) / 255;
if( !i_trans )
{
/* Completely transparent. Don't change pixel */
continue;
}
else if( i_trans == MAX_TRANS )
{
/* Completely opaque. Completely overwrite underlying pixel */
p_dst[i_x * i_pix_pitch + 0] = R;
p_dst[i_x * i_pix_pitch + 1] = G;
p_dst[i_x * i_pix_pitch + 2] = B;
continue;
}
/* Blending */
p_dst[i_x * i_pix_pitch + 0] = ( R * i_trans +
(uint16_t)p_src1[i_x * i_pix_pitch + 0] *
(MAX_TRANS - i_trans) ) >> TRANS_BITS;
p_dst[i_x * i_pix_pitch + 1] = ( G * i_trans +
(uint16_t)p_src1[i_x * i_pix_pitch + 1] *
(MAX_TRANS - i_trans) ) >> TRANS_BITS;
p_dst[i_x * i_pix_pitch + 2] = ( B * i_trans +
(uint16_t)p_src1[i_x * i_pix_pitch + 2] *
(MAX_TRANS - i_trans) ) >> TRANS_BITS;
#undef R
#undef G
#undef B
}
}
#undef MAX_TRANS
#undef TRANS_BITS
return;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment