Commit 8ac537a3 authored by Laurent Aimar's avatar Laurent Aimar Committed by Jean-Baptiste Kempf

Reimplemented the blend filter in C++ and completed the colorspaces supported.

It now supports all combinaisons of
Destinations:
 - YV9, YV12, I410, I411 (8 bits)
 - I420, I422, I444 (8, 9 and 10 bits, native endianness only)
 - YUYV and the various variants
 - NV12/NV21
 - RV15, RV16, RV24, RV32
Sources
 - YUVA
 - RGBA
 - YUVP

The quality of the blending is the same, and the speeds for I420 is the same
(not tested others combinaisons).

Support for blending I420 over another picture has been removed (I don't
see the use case and it simplify the code).

It closes #5477.

(cherry picked from commit fec8f97c)
(cherry picked from commit 4729c93f)

This commit is backported, because it is the only way to have proper
Hi10p playback on linux using Xv with subtitles. I do not think we want
to deal with the support of people asking why they don't see their subtitles,
and switching to the openGL output with great drivers is...
Signed-off-by: default avatarJean-Baptiste Kempf <jb@videolan.org>
parent f459d157
......@@ -21,7 +21,7 @@ SOURCES_deinterlace = deinterlace/deinterlace.c deinterlace/deinterlace.h \
deinterlace/yadif.h deinterlace/yadif_template.h \
deinterlace/algo_phosphor.c deinterlace/algo_phosphor.h \
deinterlace/algo_ivtc.c deinterlace/algo_ivtc.h
SOURCES_blend = blend.c
SOURCES_blend = blend.cpp
SOURCES_scale = scale.c
SOURCES_marq = marq.c
SOURCES_rss = rss.c
......
/*****************************************************************************
* blend.c: alpha blend 2 pictures together
*****************************************************************************
* Copyright (C) 2003-2009 the VideoLAN team
* $Id$
*
* Authors: Gildas Bazin <gbazin@videolan.org>
* Antoine Cellerier <dionoea @t videolan dot org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <assert.h>
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include "filter_picture.h"
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int OpenFilter ( vlc_object_t * );
static void CloseFilter( vlc_object_t * );
vlc_module_begin ()
set_description( N_("Video pictures blending") )
set_capability( "video blending", 100 )
set_callbacks( OpenFilter, CloseFilter )
vlc_module_end ()
/****************************************************************************
* Local prototypes
****************************************************************************/
static void Blend( filter_t *, picture_t *, const picture_t *,
int, int, int );
/* YUVA */
static void BlendYUVAI420( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendYUVARV16( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendYUVARV24( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendYUVAYUVPacked( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
/* I420, YV12 */
static void BlendI420I420( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendI420I420_no_alpha(
filter_t *, picture_t *, const picture_t *,
int, int, int, int );
static void BlendI420R16( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendI420R24( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendI420YUVPacked( filter_t *, picture_t *,
const picture_t *, int, int, int, int, int );
/* YUVP */
static void BlendPalI420( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendPalYUVPacked( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendPalRV( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
/* RGBA */
static void BlendRGBAI420( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendRGBAYUVPacked( filter_t *, picture_t *,
const picture_t *, int, int, int, int, int );
static void BlendRGBAR16( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
static void BlendRGBAR24( filter_t *, picture_t *, const picture_t *,
int, int, int, int, int );
struct filter_sys_t
{
int i_blendcfg;
};
typedef void (*BlendFunction)( filter_t *,
picture_t *, const picture_t *,
int , int , int , int , int );
#define VLC_CODEC_PLANAR_420 { VLC_CODEC_I420, VLC_CODEC_J420, VLC_CODEC_YV12, 0 }
#define VLC_CODEC_PACKED_422 { VLC_CODEC_YUYV, VLC_CODEC_UYVY, VLC_CODEC_YVYU, VLC_CODEC_VYUY, 0 }
#define VLC_CODEC_RGB_16 { VLC_CODEC_RGB15, VLC_CODEC_RGB16, 0 }
#define VLC_CODEC_RGB_24 { VLC_CODEC_RGB24, VLC_CODEC_RGB32, 0 }
#define BLEND_CFG( fccSrc, fctPlanar, fctPacked, fctRgb16, fctRgb24 ) \
{ .src = fccSrc, .p_dst = VLC_CODEC_PLANAR_420, .pf_blend = fctPlanar }, \
{ .src = fccSrc, .p_dst = VLC_CODEC_PACKED_422, .pf_blend = fctPacked }, \
{ .src = fccSrc, .p_dst = VLC_CODEC_RGB_16, .pf_blend = fctRgb16 }, \
{ .src = fccSrc, .p_dst = VLC_CODEC_RGB_24, .pf_blend = fctRgb24 }
static const struct
{
vlc_fourcc_t src;
vlc_fourcc_t p_dst[16];
BlendFunction pf_blend;
} p_blend_cfg[] = {
BLEND_CFG( VLC_CODEC_YUVA, BlendYUVAI420, BlendYUVAYUVPacked, BlendYUVARV16, BlendYUVARV24 ),
BLEND_CFG( VLC_CODEC_YUVP, BlendPalI420, BlendPalYUVPacked, BlendPalRV, BlendPalRV ),
BLEND_CFG( VLC_CODEC_RGBA, BlendRGBAI420, BlendRGBAYUVPacked, BlendRGBAR16, BlendRGBAR24 ),
BLEND_CFG( VLC_CODEC_I420, BlendI420I420, BlendI420YUVPacked, BlendI420R16, BlendI420R24 ),
BLEND_CFG( VLC_CODEC_YV12, BlendI420I420, BlendI420YUVPacked, BlendI420R16, BlendI420R24 ),
{ 0, {0,}, NULL }
};
/*****************************************************************************
* OpenFilter: probe the filter and return score
*****************************************************************************/
static int OpenFilter( vlc_object_t *p_this )
{
filter_t *p_filter = (filter_t*)p_this;
filter_sys_t *p_sys = (filter_sys_t *)malloc( sizeof( filter_sys_t ) );
if( !p_sys )
return VLC_ENOMEM;
p_filter->p_sys = p_sys;
p_filter->p_sys->i_blendcfg = -1;
/* Check if we can handle that format.
* We could try to use a chroma filter if we can't. */
int in_chroma = p_filter->fmt_in.video.i_chroma;
int out_chroma = p_filter->fmt_out.video.i_chroma;
if( ( in_chroma != VLC_CODEC_YUVA && in_chroma != VLC_CODEC_I420 &&
in_chroma != VLC_CODEC_YV12 && in_chroma != VLC_CODEC_YUVP &&
in_chroma != VLC_CODEC_RGBA ) ||
( out_chroma != VLC_CODEC_I420 && out_chroma != VLC_CODEC_J420 &&
out_chroma != VLC_CODEC_YV12 &&
out_chroma != VLC_CODEC_YUYV && out_chroma != VLC_CODEC_YVYU &&
out_chroma != VLC_CODEC_UYVY && out_chroma != VLC_CODEC_VYUY &&
out_chroma != VLC_CODEC_RGB15 &&
out_chroma != VLC_CODEC_RGB16 &&
out_chroma != VLC_CODEC_RGB24 &&
out_chroma != VLC_CODEC_RGB32 ) )
{
return VLC_EGENERIC;
}
for( int i = 0; p_blend_cfg[i].src != 0; i++ )
{
if( p_blend_cfg[i].src != p_filter->fmt_in.video.i_chroma )
continue;
for( int j = 0; p_blend_cfg[i].p_dst[j] != 0; j++ )
{
if( p_blend_cfg[i].p_dst[j] != p_filter->fmt_out.video.i_chroma )
continue;
p_sys->i_blendcfg = i;
}
}
if( p_sys->i_blendcfg == -1 )
{
msg_Dbg( p_filter, "no matching alpha blending routine "
"(chroma: %4.4s -> %4.4s)",
(char *)&p_filter->fmt_in.video.i_chroma,
(char *)&p_filter->fmt_out.video.i_chroma );
free( p_sys );
return VLC_EGENERIC;
}
/* Misc init */
p_filter->pf_video_blend = Blend;
msg_Dbg( p_filter, "chroma: %4.4s -> %4.4s",
(char *)&p_filter->fmt_in.video.i_chroma,
(char *)&p_filter->fmt_out.video.i_chroma );
return VLC_SUCCESS;
}
/*****************************************************************************
* CloseFilter: clean up the filter
*****************************************************************************/
static void CloseFilter( vlc_object_t *p_this )
{
filter_t *p_filter = (filter_t*)p_this;
free( p_filter->p_sys );
}
/****************************************************************************
* Blend: the whole thing
****************************************************************************
* This function is called just after the thread is launched.
****************************************************************************/
static void Blend( filter_t *p_filter,
picture_t *p_dst, const picture_t *p_src,
int i_x_offset, int i_y_offset, int i_alpha )
{
int i_width, i_height;
if( i_alpha == 0 )
return;
i_width = __MIN((int)p_filter->fmt_out.video.i_visible_width - i_x_offset,
(int)p_filter->fmt_in.video.i_visible_width);
i_height = __MIN((int)p_filter->fmt_out.video.i_visible_height -i_y_offset,
(int)p_filter->fmt_in.video.i_visible_height);
if( i_width <= 0 || i_height <= 0 )
return;
video_format_FixRgb( &p_filter->fmt_out.video );
video_format_FixRgb( &p_filter->fmt_in.video );
#if 0
msg_Dbg( p_filter, "chroma: %4.4s -> %4.4s",
(char *)&p_filter->fmt_in.video.i_chroma,
(char *)&p_filter->fmt_out.video.i_chroma );
#endif
p_blend_cfg[p_filter->p_sys->i_blendcfg].pf_blend( p_filter, p_dst, p_src,
i_x_offset, i_y_offset,
i_width, i_height, i_alpha );
}
/***********************************************************************
* Utils
***********************************************************************/
#define MAX_TRANS 255
#define TRANS_BITS 8
static inline int vlc_blend( int v1, int v2, int a )
{
/* TODO bench if the tests really increase speed */
if( a == 0 )
return v2;
else if( a == MAX_TRANS )
return v1;
return ( v1 * a + v2 * (MAX_TRANS - a ) ) >> TRANS_BITS;
}
static inline int vlc_alpha( int t, int a )
{
if( a == 255 )
return t;
return (t * a) / 255;
}
static uint8_t *vlc_plane_start( int *pi_pitch,
const picture_t *p_picture,
int i_plane,
int i_x_offset, int i_y_offset,
const video_format_t *p_fmt,
int r )
{
const int i_pitch = p_picture->p[i_plane].i_pitch;
uint8_t *p_pixels = p_picture->p[i_plane].p_pixels;
const int i_dx = ( i_x_offset + p_fmt->i_x_offset ) / r;
const int i_dy = ( i_y_offset + p_fmt->i_y_offset ) / r;
if( pi_pitch )
*pi_pitch = i_pitch;
return &p_pixels[ i_dy * i_pitch + i_dx ];
}
static void vlc_yuv_packed_index( int *pi_y, int *pi_u, int *pi_v, vlc_fourcc_t i_chroma )
{
static const struct {
vlc_fourcc_t chroma;
int y, u ,v;
} p_index[] = {
{ VLC_CODEC_YUYV, 0, 1, 3 },
{ VLC_CODEC_UYVY, 1, 0, 2 },
{ VLC_CODEC_YVYU, 0, 3, 1 },
{ VLC_CODEC_VYUY, 1, 2, 0 },
{ 0, 0, 0, 0 }
};
int i;
for( i = 0; p_index[i].chroma != 0; i++ )
{
if( p_index[i].chroma == i_chroma )
break;
}
*pi_y = p_index[i].y;
*pi_u = p_index[i].u;
*pi_v = p_index[i].v;
}
static void vlc_blend_packed( uint8_t *p_dst,
int i_offset0, int i_offset1, int i_offset2,
int c0, int c1, int c2, int i_alpha,
bool b_do12 )
{
p_dst[i_offset0] = vlc_blend( c0, p_dst[i_offset0], i_alpha );
if( b_do12 )
{
p_dst[i_offset1] = vlc_blend( c1, p_dst[i_offset1], i_alpha );
p_dst[i_offset2] = vlc_blend( c2, p_dst[i_offset2], i_alpha );
}
}
static void vlc_blend_rgb16( uint16_t *p_dst,
int R, int G, int B, int i_alpha,
const video_format_t *p_fmt )
{
const int i_pix = *p_dst;
const int r = ( i_pix & p_fmt->i_rmask ) >> p_fmt->i_lrshift;
const int g = ( i_pix & p_fmt->i_gmask ) >> p_fmt->i_lgshift;
const int b = ( i_pix & p_fmt->i_bmask ) >> p_fmt->i_lbshift;
*p_dst = ( vlc_blend( R >> p_fmt->i_rrshift, r, i_alpha ) << p_fmt->i_lrshift ) |
( vlc_blend( G >> p_fmt->i_rgshift, g, i_alpha ) << p_fmt->i_lgshift ) |
( vlc_blend( B >> p_fmt->i_rbshift, b, i_alpha ) << p_fmt->i_lbshift );
}
static void vlc_rgb_index( int *pi_rindex, int *pi_gindex, int *pi_bindex,
const video_format_t *p_fmt )
{
if( p_fmt->i_chroma != VLC_CODEC_RGB24 && p_fmt->i_chroma != VLC_CODEC_RGB32 )
return;
/* XXX it will works only if mask are 8 bits aligned */
#ifdef WORDS_BIGENDIAN
const int i_mask_bits = p_fmt->i_chroma == VLC_CODEC_RGB24 ? 24 : 32;
*pi_rindex = ( i_mask_bits - p_fmt->i_lrshift ) / 8;
*pi_gindex = ( i_mask_bits - p_fmt->i_lgshift ) / 8;
*pi_bindex = ( i_mask_bits - p_fmt->i_lbshift ) / 8;
#else
*pi_rindex = p_fmt->i_lrshift / 8;
*pi_gindex = p_fmt->i_lgshift / 8;
*pi_bindex = p_fmt->i_lbshift / 8;
#endif
}
/***********************************************************************
* YUVA
***********************************************************************/
static void BlendYUVAI420( filter_t *p_filter,
picture_t *p_dst, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_src_y, *p_dst_y;
uint8_t *p_src_u, *p_dst_u;
uint8_t *p_src_v, *p_dst_v;
uint8_t *p_trans;
int i_x, i_y, i_trans = 0;
bool b_even_scanline = i_y_offset % 2;
bool b_swap_up = vlc_fourcc_AreUVPlanesSwapped( p_filter->fmt_out.video.i_chroma,
VLC_CODEC_I420 );
p_dst_y = vlc_plane_start( &i_dst_pitch, p_dst, Y_PLANE,
i_x_offset, i_y_offset, &p_filter->fmt_out.video, 1 );
p_dst_u = vlc_plane_start( NULL, p_dst, b_swap_up ? V_PLANE : U_PLANE,
i_x_offset, i_y_offset, &p_filter->fmt_out.video, 2 );
p_dst_v = vlc_plane_start( NULL, p_dst, b_swap_up ? U_PLANE : V_PLANE,
i_x_offset, i_y_offset, &p_filter->fmt_out.video, 2 );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_trans = vlc_plane_start( NULL, p_src, A_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++, p_trans += i_src_pitch,
p_dst_y += i_dst_pitch, p_src_y += i_src_pitch,
p_dst_u += b_even_scanline ? i_dst_pitch/2 : 0,
p_src_u += i_src_pitch,
p_dst_v += b_even_scanline ? i_dst_pitch/2 : 0,
p_src_v += i_src_pitch )
{
b_even_scanline = !b_even_scanline;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
if( p_trans )
i_trans = vlc_alpha( p_trans[i_x], i_alpha );
if( !i_trans )
continue;
/* Blending */
p_dst_y[i_x] = vlc_blend( p_src_y[i_x], p_dst_y[i_x], i_trans );
if( b_even_scanline && i_x % 2 == 0 )
{
p_dst_u[i_x/2] = vlc_blend( p_src_u[i_x], p_dst_u[i_x/2], i_trans );
p_dst_v[i_x/2] = vlc_blend( p_src_v[i_x], p_dst_v[i_x/2], i_trans );
}
}
}
}
static void BlendYUVARV16( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src_y;
uint8_t *p_src_u, *p_src_v;
uint8_t *p_trans;
int i_x, i_y, i_pix_pitch, i_trans = 0;
int r, g, b;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_trans = vlc_plane_start( NULL, p_src, A_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++, p_trans += i_src_pitch,
p_dst += i_dst_pitch,
p_src_y += i_src_pitch, p_src_u += i_src_pitch,
p_src_v += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
if( p_trans )
i_trans = vlc_alpha( p_trans[i_x], i_alpha );
if( !i_trans )
continue;
/* Blending */
yuv_to_rgb( &r, &g, &b,
p_src_y[i_x], p_src_u[i_x], p_src_v[i_x] );
vlc_blend_rgb16( (uint16_t*)&p_dst[i_x * i_pix_pitch],
r, g, b, i_trans, &p_filter->fmt_out.video );
}
}
}
static void BlendYUVARV24( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src_y;
uint8_t *p_src_u, *p_src_v;
uint8_t *p_trans;
int i_x, i_y, i_pix_pitch, i_trans = 0;
int r, g, b;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_trans = vlc_plane_start( NULL, p_src, A_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
if( (i_pix_pitch == 4)
&& (((((intptr_t)p_dst)|i_dst_pitch) /* FIXME? */
& 3) == 0) )
{
/*
** if picture pixels are 32 bits long and lines addresses are 32 bit
** aligned, optimize rendering
*/
uint32_t *p32_dst = (uint32_t *)p_dst;
uint32_t i32_dst_pitch = (uint32_t)(i_dst_pitch>>2);
int i_rshift, i_gshift, i_bshift;
uint32_t i_rmask, i_gmask, i_bmask;
i_rmask = p_filter->fmt_out.video.i_rmask;
i_gmask = p_filter->fmt_out.video.i_gmask;
i_bmask = p_filter->fmt_out.video.i_bmask;
i_rshift = p_filter->fmt_out.video.i_lrshift;
i_gshift = p_filter->fmt_out.video.i_lgshift;
i_bshift = p_filter->fmt_out.video.i_lbshift;
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++, p_trans += i_src_pitch,
p32_dst += i32_dst_pitch,
p_src_y += i_src_pitch, p_src_u += i_src_pitch,
p_src_v += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
if( p_trans )
i_trans = vlc_alpha( p_trans[i_x], i_alpha );
if( !i_trans )
continue;
if( i_trans == MAX_TRANS )
{
/* Completely opaque. Completely overwrite underlying pixel */
yuv_to_rgb( &r, &g, &b,
p_src_y[i_x], p_src_u[i_x], p_src_v[i_x] );
p32_dst[i_x] = (r<<i_rshift) |
(g<<i_gshift) |
(b<<i_bshift);
}
else
{
/* Blending */
uint32_t i_pix_dst = p32_dst[i_x];
yuv_to_rgb( &r, &g, &b,
p_src_y[i_x], p_src_u[i_x], p_src_v[i_x] );
p32_dst[i_x] = ( vlc_blend( r, (i_pix_dst & i_rmask)>>i_rshift, i_trans ) << i_rshift ) |
( vlc_blend( g, (i_pix_dst & i_gmask)>>i_gshift, i_trans ) << i_gshift ) |
( vlc_blend( b, (i_pix_dst & i_bmask)>>i_bshift, i_trans ) << i_bshift );
}
}
}
}
else
{
int i_rindex, i_gindex, i_bindex;
vlc_rgb_index( &i_rindex, &i_gindex, &i_bindex, &p_filter->fmt_out.video );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++, p_trans += i_src_pitch,
p_dst += i_dst_pitch,
p_src_y += i_src_pitch, p_src_u += i_src_pitch,
p_src_v += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
if( p_trans )
i_trans = vlc_alpha( p_trans[i_x], i_alpha );
if( !i_trans )
continue;
/* Blending */
yuv_to_rgb( &r, &g, &b,
p_src_y[i_x], p_src_u[i_x], p_src_v[i_x] );
vlc_blend_packed( &p_dst[ i_x * i_pix_pitch],
i_rindex, i_gindex, i_bindex,
r, g, b, i_alpha, true );
}
}
}
}
static void BlendYUVAYUVPacked( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src_y;
uint8_t *p_src_u, *p_src_v;
uint8_t *p_trans;
int i_x, i_y, i_pix_pitch, i_trans = 0;
bool b_even = !((i_x_offset + p_filter->fmt_out.video.i_x_offset)%2);
int i_l_offset, i_u_offset, i_v_offset;
vlc_yuv_packed_index( &i_l_offset, &i_u_offset, &i_v_offset,
p_filter->fmt_out.video.i_chroma );
i_pix_pitch = 2;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_trans = vlc_plane_start( NULL, p_src, A_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
i_width &= ~1; /* Needs to be a multiple of 2 */
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++, p_trans += i_src_pitch,
p_dst += i_dst_pitch,
p_src_y += i_src_pitch, p_src_u += i_src_pitch,
p_src_v += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++, b_even = !b_even )
{
i_trans = vlc_alpha( p_trans[i_x], i_alpha );
if( !i_trans )
continue;
/* Blending */
if( b_even )
{
int i_u;
int i_v;
/* FIXME what's with 0xaa ? */
if( p_trans[i_x+1] > 0xaa )
{
i_u = (p_src_u[i_x]+p_src_u[i_x+1])>>1;
i_v = (p_src_v[i_x]+p_src_v[i_x+1])>>1;
}
else
{
i_u = p_src_u[i_x];
i_v = p_src_v[i_x];
}
vlc_blend_packed( &p_dst[i_x * 2],
i_l_offset, i_u_offset, i_v_offset,
p_src_y[i_x], i_u, i_v, i_trans, true );
}
else
{
p_dst[i_x * 2 + i_l_offset] = vlc_blend( p_src_y[i_x], p_dst[i_x * 2 + i_l_offset], i_trans );
}
}
}
}
/***********************************************************************
* I420, YV12
***********************************************************************/
static void BlendI420I420( filter_t *p_filter,
picture_t *p_dst, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_src_y, *p_dst_y;
uint8_t *p_src_u, *p_dst_u;
uint8_t *p_src_v, *p_dst_v;
int i_x, i_y;
bool b_even_scanline = i_y_offset % 2;
if( i_alpha == 0xff )
{
BlendI420I420_no_alpha( p_filter, p_dst, p_src,
i_x_offset, i_y_offset, i_width, i_height );
return;
}
bool b_swap_up = vlc_fourcc_AreUVPlanesSwapped( p_filter->fmt_out.video.i_chroma,
VLC_CODEC_I420 );
i_dst_pitch = p_dst->p[Y_PLANE].i_pitch;
p_dst_y = p_dst->p[Y_PLANE].p_pixels + i_x_offset +
p_filter->fmt_out.video.i_x_offset +
p_dst->p[Y_PLANE].i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
const int i_u_plane = b_swap_up ? V_PLANE : U_PLANE;
p_dst_u = p_dst->p[i_u_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_u_plane].i_pitch;
const int i_v_plane = b_swap_up ? U_PLANE : V_PLANE;
p_dst_v = p_dst->p[i_v_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_v_plane].i_pitch;
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
i_width &= ~1;
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst_y += i_dst_pitch,
p_src_y += i_src_pitch )
{
if( b_even_scanline )
{
p_dst_u += i_dst_pitch/2;
p_dst_v += i_dst_pitch/2;
}
b_even_scanline = !b_even_scanline;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
if( !i_alpha )
continue;
/* Blending */
p_dst_y[i_x] = vlc_blend( p_src_y[i_x], p_dst_y[i_x], i_alpha );
if( b_even_scanline && i_x % 2 == 0 )
{
p_dst_u[i_x/2] = vlc_blend( p_src_u[i_x/2], p_dst_u[i_x/2], i_alpha );
p_dst_v[i_x/2] = vlc_blend( p_src_v[i_x/2], p_dst_v[i_x/2], i_alpha );
}
}
if( i_y%2 == 1 )
{
p_src_u += i_src_pitch/2;
p_src_v += i_src_pitch/2;
}
}
}
static void BlendI420I420_no_alpha( filter_t *p_filter,
picture_t *p_dst, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_src_y, *p_dst_y;
uint8_t *p_src_u, *p_dst_u;
uint8_t *p_src_v, *p_dst_v;
int i_y;
bool b_even_scanline = i_y_offset % 2;
bool b_swap_up = vlc_fourcc_AreUVPlanesSwapped( p_filter->fmt_out.video.i_chroma,
VLC_CODEC_I420 );
i_dst_pitch = p_dst->p[Y_PLANE].i_pitch;
p_dst_y = p_dst->p[Y_PLANE].p_pixels + i_x_offset +
p_filter->fmt_out.video.i_x_offset +
p_dst->p[Y_PLANE].i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
const int i_u_plane = b_swap_up ? V_PLANE : U_PLANE;
p_dst_u = p_dst->p[i_u_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_u_plane].i_pitch;
const int i_v_plane = b_swap_up ? U_PLANE : V_PLANE;
p_dst_v = p_dst->p[i_v_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_v_plane].i_pitch;
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
i_width &= ~1;
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height;
i_y++, p_dst_y += i_dst_pitch, p_src_y += i_src_pitch )
{
/* Completely opaque. Completely overwrite underlying pixel */
vlc_memcpy( p_dst_y, p_src_y, i_width );
if( b_even_scanline )
{
p_dst_u += i_dst_pitch/2;
p_dst_v += i_dst_pitch/2;
}
else
{
vlc_memcpy( p_dst_u, p_src_u, i_width/2 );
vlc_memcpy( p_dst_v, p_src_v, i_width/2 );
}
b_even_scanline = !b_even_scanline;
if( i_y%2 == 1 )
{
p_src_u += i_src_pitch/2;
p_src_v += i_src_pitch/2;
}
}
}
static void BlendI420R16( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src_y;
uint8_t *p_src_u, *p_src_v;
int i_x, i_y, i_pix_pitch;
int r, g, b;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch,
p_src_y += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
/* Blending */
yuv_to_rgb( &r, &g, &b,
p_src_y[i_x], p_src_u[i_x/2], p_src_v[i_x/2] );
vlc_blend_rgb16( (uint16_t*)&p_dst[i_x * i_pix_pitch],
r, g, b, i_alpha, &p_filter->fmt_out.video );
}
if( i_y%2 == 1 )
{
p_src_u += i_src_pitch/2;
p_src_v += i_src_pitch/2;
}
}
}
static void BlendI420R24( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src_y;
uint8_t *p_src_u, *p_src_v;
int i_x, i_y, i_pix_pitch;
int i_rindex, i_gindex, i_bindex;
int r, g, b;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
vlc_rgb_index( &i_rindex, &i_gindex, &i_bindex, &p_filter->fmt_out.video );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch,
p_src_y += i_src_pitch, p_src_u += i_src_pitch,
p_src_v += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
if( !i_alpha )
continue;
/* Blending */
yuv_to_rgb( &r, &g, &b,
p_src_y[i_x], p_src_u[i_x/2], p_src_v[i_x/2] );
vlc_blend_packed( &p_dst[i_x * i_pix_pitch],
i_rindex, i_gindex, i_bindex, r, g, b, i_alpha, true );
}
if( i_y%2 == 1 )
{
p_src_u += i_src_pitch/2;
p_src_v += i_src_pitch/2;
}
}
}
static void BlendI420YUVPacked( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src_y;
uint8_t *p_src_u, *p_src_v;
int i_x, i_y, i_pix_pitch;
bool b_even = !((i_x_offset + p_filter->fmt_out.video.i_x_offset)%2);
int i_l_offset, i_u_offset, i_v_offset;
vlc_yuv_packed_index( &i_l_offset, &i_u_offset, &i_v_offset,
p_filter->fmt_out.video.i_chroma );
i_pix_pitch = 2;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
p_src_y = vlc_plane_start( &i_src_pitch, p_src, Y_PLANE,
0, 0, &p_filter->fmt_in.video, 1 );
p_src_u = vlc_plane_start( NULL, p_src, U_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
p_src_v = vlc_plane_start( NULL, p_src, V_PLANE,
0, 0, &p_filter->fmt_in.video, 2 );
i_width &= ~1; /* Needs to be a multiple of 2 */
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch,
p_src_y += i_src_pitch, p_src_u += i_src_pitch,
p_src_v += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++, b_even = !b_even )
{
if( !i_alpha )
continue;
/* Blending */
vlc_blend_packed( &p_dst[i_x * 2],
i_l_offset, i_u_offset, i_v_offset,
p_src_y[i_x], p_src_u[i_x/2], p_src_v[i_x/2], i_alpha, b_even );
}
if( i_y%2 == 1 )
{
p_src_u += i_src_pitch/2;
p_src_v += i_src_pitch/2;
}
}
}
/***********************************************************************
* YUVP
***********************************************************************/
static void BlendPalI420( filter_t *p_filter,
picture_t *p_dst, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_src, *p_dst_y;
uint8_t *p_dst_u;
uint8_t *p_dst_v;
int i_x, i_y, i_trans;
bool b_even_scanline = i_y_offset % 2;
bool b_swap_up = vlc_fourcc_AreUVPlanesSwapped( p_filter->fmt_out.video.i_chroma,
VLC_CODEC_I420 );
i_dst_pitch = p_dst->p[Y_PLANE].i_pitch;
p_dst_y = p_dst->p[Y_PLANE].p_pixels + i_x_offset +
p_filter->fmt_out.video.i_x_offset +
p_dst->p[Y_PLANE].i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
const int i_u_plane = b_swap_up ? V_PLANE : U_PLANE;
p_dst_u = p_dst->p[i_u_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_u_plane].i_pitch;
const int i_v_plane = b_swap_up ? U_PLANE : V_PLANE;
p_dst_v = p_dst->p[i_v_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_v_plane].i_pitch;
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels + p_filter->fmt_in.video.i_x_offset +
i_src_pitch * p_filter->fmt_in.video.i_y_offset;
#define p_pal p_filter->fmt_in.video.p_palette->palette
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst_y += i_dst_pitch,
p_src += i_src_pitch,
p_dst_u += b_even_scanline ? i_dst_pitch/2 : 0,
p_dst_v += b_even_scanline ? i_dst_pitch/2 : 0 )
{
const uint8_t *p_trans = p_src;
b_even_scanline = !b_even_scanline;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
i_trans = vlc_alpha( p_pal[p_trans[i_x]][3], i_alpha );
if( !i_trans )
continue;
/* Blending */
p_dst_y[i_x] = vlc_blend( p_pal[p_src[i_x]][0], p_dst_y[i_x], i_trans );
if( b_even_scanline && ((i_x % 2) == 0) )
{
p_dst_u[i_x/2] = vlc_blend( p_pal[p_src[i_x]][1], p_dst_u[i_x/2], i_trans );
p_dst_v[i_x/2] = vlc_blend( p_pal[p_src[i_x]][2], p_dst_v[i_x/2], i_trans );
}
}
}
#undef p_pal
}
static void BlendPalYUVPacked( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_src, *p_dst;
int i_x, i_y, i_pix_pitch, i_trans;
bool b_even = !((i_x_offset + p_filter->fmt_out.video.i_x_offset)%2);
int i_l_offset, i_u_offset, i_v_offset;
vlc_yuv_packed_index( &i_l_offset, &i_u_offset, &i_v_offset,
p_filter->fmt_out.video.i_chroma );
i_pix_pitch = 2;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_pix_pitch * (i_x_offset +
p_filter->fmt_out.video.i_x_offset) + p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels + p_filter->fmt_in.video.i_x_offset +
i_src_pitch * p_filter->fmt_in.video.i_y_offset;
i_width &= ~1; /* Needs to be a multiple of 2 */
#define p_pal p_filter->fmt_in.video.p_palette->palette
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch, p_src += i_src_pitch )
{
const uint8_t *p_trans = p_src;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++, b_even = !b_even )
{
i_trans = vlc_alpha( p_pal[p_trans[i_x]][3], i_alpha );
if( !i_trans )
continue;
/* Blending */
if( b_even )
{
uint16_t i_u;
uint16_t i_v;
if( p_trans[i_x+1] > 0xaa )
{
i_u = (p_pal[p_src[i_x]][1] + p_pal[p_src[i_x+1]][1]) >> 1;
i_v = (p_pal[p_src[i_x]][2] + p_pal[p_src[i_x+1]][2]) >> 1;
}
else
{
i_u = p_pal[p_src[i_x]][1];
i_v = p_pal[p_src[i_x]][2];
}
vlc_blend_packed( &p_dst[i_x * 2],
i_l_offset, i_u_offset, i_v_offset,
p_pal[p_src[i_x]][0], i_u, i_v, i_trans, true );
}
else
{
p_dst[i_x * 2 + i_l_offset] = vlc_blend( p_pal[p_src[i_x]][0], p_dst[i_x * 2 + i_l_offset], i_trans );
}
}
}
#undef p_pal
}
static void BlendPalRV( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_src, *p_dst;
int i_x, i_y, i_pix_pitch, i_trans;
video_palette_t rgbpalette;
int i_rindex, i_gindex, i_bindex;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_pix_pitch * (i_x_offset +
p_filter->fmt_out.video.i_x_offset) + p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels + p_filter->fmt_in.video.i_x_offset +
i_src_pitch * p_filter->fmt_in.video.i_y_offset;
#define p_pal p_filter->fmt_in.video.p_palette->palette
#define rgbpal rgbpalette.palette
/* Convert palette first */
for( i_y = 0; i_y < p_filter->fmt_in.video.p_palette->i_entries && i_y < 256; i_y++ )
{
int r, g, b;
yuv_to_rgb( &r, &g, &b, p_pal[i_y][0], p_pal[i_y][1], p_pal[i_y][2] );
rgbpal[i_y][0] = r;
rgbpal[i_y][1] = g;
rgbpal[i_y][2] = b;
}
/* */
vlc_rgb_index( &i_rindex, &i_gindex, &i_bindex, &p_filter->fmt_out.video );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch, p_src += i_src_pitch )
{
const uint8_t *p_trans = p_src;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
i_trans = vlc_alpha( p_pal[p_trans[i_x]][3], i_alpha );
if( !i_trans )
continue;
/* Blending */
if( p_filter->fmt_out.video.i_chroma == VLC_CODEC_RGB15 || p_filter->fmt_out.video.i_chroma == VLC_CODEC_RGB16 )
vlc_blend_rgb16( (uint16_t*)&p_dst[i_x * i_pix_pitch],
rgbpal[p_src[i_x]][0], rgbpal[p_src[i_x]][1], rgbpal[p_src[i_x]][2],
i_trans,
&p_filter->fmt_out.video );
else
vlc_blend_packed( &p_dst[i_x * i_pix_pitch],
i_rindex, i_gindex, i_bindex,
rgbpal[p_src[i_x]][0], rgbpal[p_src[i_x]][1], rgbpal[p_src[i_x]][2],
i_trans, true );
}
}
#undef p_pal
#undef rgbpal
}
/***********************************************************************
* RGBA
***********************************************************************/
static void BlendRGBAI420( filter_t *p_filter,
picture_t *p_dst, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch, i_src_pix_pitch;
uint8_t *p_dst_y;
uint8_t *p_dst_u;
uint8_t *p_dst_v;
uint8_t *p_src;
int i_x, i_y, i_trans;
uint8_t y, u, v;
bool b_even_scanline = i_y_offset % 2;
bool b_swap_up = vlc_fourcc_AreUVPlanesSwapped( p_filter->fmt_out.video.i_chroma,
VLC_CODEC_I420 );
i_dst_pitch = p_dst->p[Y_PLANE].i_pitch;
p_dst_y = p_dst->p[Y_PLANE].p_pixels + i_x_offset +
p_filter->fmt_out.video.i_x_offset +
p_dst->p[Y_PLANE].i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
const int i_u_plane = b_swap_up ? V_PLANE : U_PLANE;
p_dst_u = p_dst->p[i_u_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_u_plane].i_pitch;
const int i_v_plane = b_swap_up ? U_PLANE : V_PLANE;
p_dst_v = p_dst->p[i_v_plane].p_pixels + i_x_offset/2 +
p_filter->fmt_out.video.i_x_offset/2 +
( i_y_offset + p_filter->fmt_out.video.i_y_offset ) / 2 *
p_dst->p[i_v_plane].i_pitch;
i_src_pix_pitch = p_src_pic->p->i_pixel_pitch;
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels +
p_filter->fmt_in.video.i_x_offset * i_src_pix_pitch +
p_src_pic->p->i_pitch * p_filter->fmt_in.video.i_y_offset;
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst_y += i_dst_pitch,
p_dst_u += b_even_scanline ? i_dst_pitch/2 : 0,
p_dst_v += b_even_scanline ? i_dst_pitch/2 : 0,
p_src += i_src_pitch )
{
b_even_scanline = !b_even_scanline;
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
const int R = p_src[i_x * i_src_pix_pitch + 0];
const int G = p_src[i_x * i_src_pix_pitch + 1];
const int B = p_src[i_x * i_src_pix_pitch + 2];
i_trans = vlc_alpha( p_src[i_x * i_src_pix_pitch + 3], i_alpha );
if( !i_trans )
continue;
/* Blending */
rgb_to_yuv( &y, &u, &v, R, G, B );
p_dst_y[i_x] = vlc_blend( y, p_dst_y[i_x], i_trans );
if( b_even_scanline && i_x % 2 == 0 )
{
p_dst_u[i_x/2] = vlc_blend( u, p_dst_u[i_x/2], i_trans );
p_dst_v[i_x/2] = vlc_blend( v, p_dst_v[i_x/2], i_trans );
}
}
}
}
static void BlendRGBAR24( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src;
int i_x, i_y, i_pix_pitch, i_trans, i_src_pix_pitch;
int i_rindex, i_gindex, i_bindex;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src_pix_pitch = p_src_pic->p->i_pixel_pitch;
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels +
p_filter->fmt_in.video.i_x_offset * i_src_pix_pitch +
p_src_pic->p->i_pitch * p_filter->fmt_in.video.i_y_offset;
vlc_rgb_index( &i_rindex, &i_gindex, &i_bindex, &p_filter->fmt_out.video );
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch, p_src += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
const int R = p_src[i_x * i_src_pix_pitch + 0];
const int G = p_src[i_x * i_src_pix_pitch + 1];
const int B = p_src[i_x * i_src_pix_pitch + 2];
i_trans = vlc_alpha( p_src[i_x * i_src_pix_pitch + 3], i_alpha );
if( !i_trans )
continue;
/* Blending */
vlc_blend_packed( &p_dst[i_x * i_pix_pitch],
i_rindex, i_gindex, i_bindex,
R, G, B, i_trans, true );
}
}
}
static void BlendRGBAR16( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch;
uint8_t *p_dst, *p_src;
int i_x, i_y, i_pix_pitch, i_trans, i_src_pix_pitch;
i_pix_pitch = p_dst_pic->p->i_pixel_pitch;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src_pix_pitch = p_src_pic->p->i_pixel_pitch;
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels +
p_filter->fmt_in.video.i_x_offset * i_src_pix_pitch +
p_src_pic->p->i_pitch * p_filter->fmt_in.video.i_y_offset;
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch, p_src += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++ )
{
const int R = p_src[i_x * i_src_pix_pitch + 0];
const int G = p_src[i_x * i_src_pix_pitch + 1];
const int B = p_src[i_x * i_src_pix_pitch + 2];
i_trans = vlc_alpha( p_src[i_x * i_src_pix_pitch + 3], i_alpha );
if( !i_trans )
continue;
/* Blending */
vlc_blend_rgb16( (uint16_t*)&p_dst[i_x * i_pix_pitch],
R, G, B, i_trans, &p_filter->fmt_out.video );
}
}
}
static void BlendRGBAYUVPacked( filter_t *p_filter,
picture_t *p_dst_pic, const picture_t *p_src_pic,
int i_x_offset, int i_y_offset,
int i_width, int i_height, int i_alpha )
{
int i_src_pitch, i_dst_pitch, i_src_pix_pitch;
uint8_t *p_dst, *p_src;
int i_x, i_y, i_pix_pitch, i_trans;
bool b_even = !((i_x_offset + p_filter->fmt_out.video.i_x_offset)%2);
int i_l_offset, i_u_offset, i_v_offset;
uint8_t y, u, v;
vlc_yuv_packed_index( &i_l_offset, &i_u_offset, &i_v_offset,
p_filter->fmt_out.video.i_chroma );
i_pix_pitch = 2;
i_dst_pitch = p_dst_pic->p->i_pitch;
p_dst = p_dst_pic->p->p_pixels + i_x_offset * i_pix_pitch +
p_filter->fmt_out.video.i_x_offset * i_pix_pitch +
p_dst_pic->p->i_pitch *
( i_y_offset + p_filter->fmt_out.video.i_y_offset );
i_src_pix_pitch = p_src_pic->p->i_pixel_pitch;
i_src_pitch = p_src_pic->p->i_pitch;
p_src = p_src_pic->p->p_pixels +
p_filter->fmt_in.video.i_x_offset * i_src_pitch +
p_src_pic->p->i_pitch * p_filter->fmt_in.video.i_y_offset;
i_width &= ~1; /* Needs to be a multiple of 2 */
/* Draw until we reach the bottom of the subtitle */
for( i_y = 0; i_y < i_height; i_y++,
p_dst += i_dst_pitch,
p_src += i_src_pitch )
{
/* Draw until we reach the end of the line */
for( i_x = 0; i_x < i_width; i_x++, b_even = !b_even )
{
const int R = p_src[i_x * i_src_pix_pitch + 0];
const int G = p_src[i_x * i_src_pix_pitch + 1];
const int B = p_src[i_x * i_src_pix_pitch + 2];
i_trans = vlc_alpha( p_src[i_x * i_src_pix_pitch + 3], i_alpha );
if( !i_trans )
continue;
/* Blending */
rgb_to_yuv( &y, &u, &v, R, G, B );
vlc_blend_packed( &p_dst[i_x * 2],
i_l_offset, i_u_offset, i_v_offset,
y, u, v, i_trans, b_even );
}
}
}
/*****************************************************************************
* blend2.cpp: Blend one picture with alpha onto another picture
*****************************************************************************
* Copyright (C) 2012 Laurent Aimar
* $Id$
*
* Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
* Preamble
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <assert.h>
#include <vlc_common.h>
#include <vlc_plugin.h>
#include <vlc_filter.h>
#include "filter_picture.h"
/*****************************************************************************
* Module descriptor
*****************************************************************************/
static int Open (vlc_object_t *);
static void Close(vlc_object_t *);
vlc_module_begin()
set_description(N_("Video pictures blending"))
set_capability("video blending", 100)
set_callbacks(Open, Close)
vlc_module_end()
static inline unsigned div255(unsigned v)
{
/* It is exact for 8 bits, and has a max error of 1 for 9 and 10 bits
* while respecting full opacity/transparency */
return ((v >> 8) + v + 1) >> 8;
//return v / 255;
}
template <typename T>
void merge(T *dst, unsigned src, unsigned f)
{
*dst = div255((255 - f) * (*dst) + src * f);
}
struct CPixel {
unsigned i, j, k;
unsigned a;
};
class CPicture {
public:
CPicture(const picture_t *picture,
const video_format_t *fmt,
unsigned x, unsigned y) : picture(picture), fmt(fmt), x(x), y(y)
{
}
CPicture(const CPicture &src) : picture(src.picture), fmt(src.fmt), x(src.x), y(src.y)
{
}
const video_format_t *getFormat() const
{
return fmt;
}
bool isFull(unsigned) const
{
return true;
}
protected:
template <unsigned ry>
uint8_t *getLine(unsigned plane = 0) const
{
return &picture->p[plane].p_pixels[(y / ry) * picture->p[plane].i_pitch];
}
const picture_t *picture;
const video_format_t *fmt;
unsigned x;
unsigned y;
};
template <typename pixel, unsigned rx, unsigned ry, bool has_alpha, bool swap_uv>
class CPictureYUVPlanar : public CPicture {
public:
CPictureYUVPlanar(const CPicture &cfg) : CPicture(cfg)
{
data[0] = CPicture::getLine< 1>(0);
data[1] = CPicture::getLine<ry>(swap_uv ? 2 : 1);
data[2] = CPicture::getLine<ry>(swap_uv ? 1 : 2);
if (has_alpha)
data[3] = CPicture::getLine<1>(3);
}
void get(CPixel *px, unsigned dx, bool full = true) const
{
px->i = *getPointer(0, dx);
if (full) {
px->j = *getPointer(1, dx);
px->k = *getPointer(2, dx);
}
if (has_alpha)
px->a = *getPointer(3, dx);
}
void merge(unsigned dx, const CPixel &spx, unsigned a, bool full)
{
::merge(getPointer(0, dx), spx.i, a);
if (full) {
::merge(getPointer(1, dx), spx.j, a);
::merge(getPointer(2, dx), spx.k, a);
}
}
bool isFull(unsigned dx) const
{
return (y % ry) == 0 && ((x + dx) % rx) == 0;
}
void nextLine()
{
y++;
data[0] += picture->p[0].i_pitch;
if ((y % ry) == 0) {
data[1] += picture->p[swap_uv ? 2 : 1].i_pitch;
data[2] += picture->p[swap_uv ? 1 : 2].i_pitch;
}
if (has_alpha)
data[3] += picture->p[3].i_pitch;
}
private:
pixel *getPointer(unsigned plane, unsigned dx) const
{
if (plane == 1 || plane == 2)
return (pixel*)&data[plane][(x + dx) / rx * sizeof(pixel)];
else
return (pixel*)&data[plane][(x + dx) / 1 * sizeof(pixel)];
}
uint8_t *data[4];
};
template <bool swap_uv>
class CPictureYUVSemiPlanar : public CPicture {
public:
CPictureYUVSemiPlanar(const CPicture &cfg) : CPicture(cfg)
{
data[0] = CPicture::getLine<1>(0);
data[1] = CPicture::getLine<2>(1);
}
void get(CPixel *px, unsigned dx, bool full = true) const
{
px->i = *getPointer(0, dx);
if (full) {
px->j = getPointer(1, dx)[swap_uv];
px->k = getPointer(1, dx)[!swap_uv];
}
}
void merge(unsigned dx, const CPixel &spx, unsigned a, bool full)
{
::merge(getPointer(0, dx), spx.i, a);
if (full) {
::merge(&getPointer(1, dx)[ swap_uv], spx.j, a);
::merge(&getPointer(1, dx)[!swap_uv], spx.k, a);
}
}
bool isFull(unsigned dx) const
{
return (y % 2) == 0 && ((x + dx) % 2) == 0;
}
void nextLine()
{
y++;
data[0] += picture->p[0].i_pitch;
if ((y % 2) == 0)
data[1] += picture->p[1].i_pitch;
}
private:
uint8_t *getPointer(unsigned plane, unsigned dx) const
{
if (plane == 0)
return &data[plane][x + dx];
else
return &data[plane][(x + dx) / 2 * 2];
}
uint8_t *data[2];
};
template <unsigned offset_y, unsigned offset_u, unsigned offset_v>
class CPictureYUVPacked : public CPicture {
public:
CPictureYUVPacked(const CPicture &cfg) : CPicture(cfg)
{
data = CPicture::getLine<1>(0);
}
void get(CPixel *px, unsigned dx, bool full = true) const
{
uint8_t *data = getPointer(dx);
px->i = data[offset_y];
if (full) {
px->j = data[offset_u];
px->k = data[offset_v];
}
}
void merge(unsigned dx, const CPixel &spx, unsigned a, bool full)
{
uint8_t *data = getPointer(dx);
::merge(&data[offset_y], spx.i, a);
if (full) {
::merge(&data[offset_u], spx.j, a);
::merge(&data[offset_v], spx.k, a);
}
}
bool isFull(unsigned dx) const
{
return ((x + dx) % 2) == 0;
}
void nextLine()
{
y++;
data += picture->p[0].i_pitch;
}
private:
uint8_t *getPointer(unsigned dx) const
{
return &data[(x + dx) * 2];
}
uint8_t *data;
};
class CPictureYUVP : public CPicture {
public:
CPictureYUVP(const CPicture &cfg) : CPicture(cfg)
{
data = CPicture::getLine<1>(0);
}
void get(CPixel *px, unsigned dx, bool = true) const
{
px->i = *getPointer(dx);
}
void nextLine()
{
y++;
data += picture->p[0].i_pitch;
}
private:
uint8_t *getPointer(unsigned dx) const
{
return &data[x + dx];
}
uint8_t *data;
};
template <unsigned bytes, bool has_alpha>
class CPictureRGBX : public CPicture {
public:
CPictureRGBX(const CPicture &cfg) : CPicture(cfg)
{
if (has_alpha) {
offset_r = 0;
offset_g = 1;
offset_b = 2;
offset_a = 3;
} else {
#ifdef WORDS_BIGENDIAN
offset_r = (8 * bytes - fmt->i_lrshift) / 8;
offset_g = (8 * bytes - fmt->i_lgshift) / 8;
offset_b = (8 * bytes - fmt->i_lbshift) / 8;
#else
offset_r = fmt->i_lrshift / 8;
offset_g = fmt->i_lgshift / 8;
offset_b = fmt->i_lbshift / 8;
#endif
}
data = CPicture::getLine<1>(0);
}
void get(CPixel *px, unsigned dx, bool = true) const
{
const uint8_t *src = getPointer(dx);
px->i = src[offset_r];
px->j = src[offset_g];
px->k = src[offset_b];
if (has_alpha)
px->a = src[offset_a];
}
void merge(unsigned dx, const CPixel &spx, unsigned a, bool)
{
uint8_t *dst = getPointer(dx);
::merge(&dst[offset_r], spx.i, a);
::merge(&dst[offset_g], spx.j, a);
::merge(&dst[offset_b], spx.k, a);
}
void nextLine()
{
y++;
data += picture->p[0].i_pitch;
}
private:
uint8_t *getPointer(unsigned dx) const
{
return &data[(x + dx) * bytes];
}
unsigned offset_r;
unsigned offset_g;
unsigned offset_b;
unsigned offset_a;
uint8_t *data;
};
class CPictureRGB16 : public CPicture {
public:
CPictureRGB16(const CPicture &cfg) : CPicture(cfg)
{
data = CPicture::getLine<1>(0);
}
void get(CPixel *px, unsigned dx, bool = true) const
{
const uint16_t data = *getPointer(dx);
px->i = (data & fmt->i_rmask) >> fmt->i_lrshift;
px->j = (data & fmt->i_gmask) >> fmt->i_lgshift;
px->k = (data & fmt->i_bmask) >> fmt->i_lbshift;
}
void merge(unsigned dx, const CPixel &spx, unsigned a, bool full)
{
CPixel dpx;
get(&dpx, dx, full);
::merge(&dpx.i, spx.i, a);
::merge(&dpx.j, spx.j, a);
::merge(&dpx.k, spx.k, a);
*getPointer(dx) = (dpx.i << fmt->i_lrshift) |
(dpx.j << fmt->i_lgshift) |
(dpx.k << fmt->i_lbshift);
}
void nextLine()
{
y++;
data += picture->p[0].i_pitch;
}
private:
uint16_t *getPointer(unsigned dx) const
{
return (uint16_t*)&data[(x + dx) * 2];
}
uint8_t *data;
};
typedef CPictureYUVPlanar<uint8_t, 1,1, true, false> CPictureYUVA;
typedef CPictureYUVPlanar<uint8_t, 4,4, false, true> CPictureYV9;
typedef CPictureYUVPlanar<uint8_t, 4,4, false, false> CPictureI410_8;
typedef CPictureYUVPlanar<uint8_t, 4,1, false, false> CPictureI411_8;
typedef CPictureYUVSemiPlanar<false> CPictureNV12;
typedef CPictureYUVSemiPlanar<true> CPictureNV21;
typedef CPictureYUVPlanar<uint8_t, 2,2, false, true> CPictureYV12;
typedef CPictureYUVPlanar<uint8_t, 2,2, false, false> CPictureI420_8;
typedef CPictureYUVPlanar<uint16_t, 2,2, false, false> CPictureI420_16;
typedef CPictureYUVPlanar<uint8_t, 2,1, false, false> CPictureI422_8;
typedef CPictureYUVPlanar<uint16_t, 2,1, false, false> CPictureI422_16;
typedef CPictureYUVPlanar<uint8_t, 1,1, false, false> CPictureI444_8;
typedef CPictureYUVPlanar<uint16_t, 1,1, false, false> CPictureI444_16;
typedef CPictureYUVPacked<0, 1, 3> CPictureYUYV;
typedef CPictureYUVPacked<1, 0, 2> CPictureUYVY;
typedef CPictureYUVPacked<0, 3, 1> CPictureYVYU;
typedef CPictureYUVPacked<1, 2, 0> CPictureVYUY;
typedef CPictureRGBX<4, true> CPictureRGBA;
typedef CPictureRGBX<4, false> CPictureRGB32;
typedef CPictureRGBX<3, false> CPictureRGB24;
struct convertNone {
convertNone(const video_format_t *, const video_format_t *) {}
void operator()(CPixel &)
{
}
};
template <unsigned dst, unsigned src>
struct convertBits {
convertBits(const video_format_t *, const video_format_t *) {}
void operator()(CPixel &p)
{
p.i = p.i * ((1 << dst) - 1) / ((1 << src) - 1);
p.j = p.j * ((1 << dst) - 1) / ((1 << src) - 1);
p.k = p.k * ((1 << dst) - 1) / ((1 << src) - 1);
}
};
typedef convertBits< 9, 8> convert8To9Bits;
typedef convertBits<10, 8> convert8To10Bits;
struct convertRgbToYuv8 {
convertRgbToYuv8(const video_format_t *, const video_format_t *) {}
void operator()(CPixel &p)
{
uint8_t y, u, v;
rgb_to_yuv(&y, &u, &v, p.i, p.j, p.k);
p.i = y;
p.j = u;
p.k = v;
}
};
struct convertYuv8ToRgb {
convertYuv8ToRgb(const video_format_t *, const video_format_t *) {}
void operator()(CPixel &p)
{
int r, g, b;
yuv_to_rgb(&r, &g, &b, p.i, p.j, p.k);
p.i = r;
p.j = g;
p.k = b;
}
};
struct convertRgbToRgbSmall {
convertRgbToRgbSmall(const video_format_t *dst, const video_format_t *) : fmt(*dst) {}
void operator()(CPixel &p)
{
p.i >>= fmt.i_rrshift;
p.j >>= fmt.i_rgshift;
p.k >>= fmt.i_rbshift;
}
private:
const video_format_t &fmt;
};
struct convertYuvpToAny {
void operator()(CPixel &p)
{
unsigned index = p.i;
p.i = palette.palette[index][0];
p.j = palette.palette[index][1];
p.k = palette.palette[index][2];
p.a = palette.palette[index][3];
}
protected:
video_palette_t palette;
};
struct convertYuvpToYuva8 : public convertYuvpToAny {
convertYuvpToYuva8(const video_format_t *, const video_format_t *src)
{
palette = *src->p_palette;
}
};
struct convertYuvpToRgba : public convertYuvpToAny {
convertYuvpToRgba(const video_format_t *, const video_format_t *src)
{
const video_palette_t *p = src->p_palette;
for (int i = 0; i < p->i_entries; i++) {
int r, g, b;
yuv_to_rgb(&r, &g, &b,
p->palette[i][0],
p->palette[i][1],
p->palette[i][2]);
palette.palette[i][0] = r;
palette.palette[i][1] = g;
palette.palette[i][2] = b;
palette.palette[i][3] = p->palette[i][3];
}
}
};
template <class G, class F>
struct compose {
compose(const video_format_t *dst, const video_format_t *src) : f(dst, src), g(dst, src) {}
void operator()(CPixel &p)
{
f(p);
g(p);
}
private:
F f;
G g;
};
template <class TDst, class TSrc, class TConvert>
void Blend(const CPicture &dst_data, const CPicture &src_data,
unsigned width, unsigned height, int alpha)
{
TSrc src(src_data);
TDst dst(dst_data);
TConvert convert(dst_data.getFormat(), src_data.getFormat());
for (unsigned y = 0; y < height; y++) {
for (unsigned x = 0; x < width; x++) {
CPixel spx;
src.get(&spx, x);
convert(spx);
unsigned a = div255(alpha * spx.a);
if (a <= 0)
continue;
if (dst.isFull(x))
dst.merge(x, spx, a, true);
else
dst.merge(x, spx, a, false);
}
src.nextLine();
dst.nextLine();
}
}
typedef void (*blend_function_t)(const CPicture &dst_data, const CPicture &src_data,
unsigned width, unsigned height, int alpha);
static const struct {
vlc_fourcc_t dst;
vlc_fourcc_t src;
blend_function_t blend;
} blends[] = {
#define RGB(csp, picture, cvt) \
{ csp, VLC_CODEC_YUVA, Blend<picture, CPictureYUVA, compose<cvt, convertYuv8ToRgb> > }, \
{ csp, VLC_CODEC_RGBA, Blend<picture, CPictureRGBA, compose<cvt, convertNone> > }, \
{ csp, VLC_CODEC_YUVP, Blend<picture, CPictureYUVP, compose<cvt, convertYuvpToRgba> > }
#define YUV(csp, picture, cvt) \
{ csp, VLC_CODEC_YUVA, Blend<picture, CPictureYUVA, compose<cvt, convertNone> > }, \
{ csp, VLC_CODEC_RGBA, Blend<picture, CPictureRGBA, compose<cvt, convertRgbToYuv8> > }, \
{ csp, VLC_CODEC_YUVP, Blend<picture, CPictureYUVP, compose<cvt, convertYuvpToYuva8> > }
RGB(VLC_CODEC_RGB15, CPictureRGB16, convertRgbToRgbSmall),
RGB(VLC_CODEC_RGB16, CPictureRGB16, convertRgbToRgbSmall),
RGB(VLC_CODEC_RGB24, CPictureRGB24, convertNone),
RGB(VLC_CODEC_RGB32, CPictureRGB32, convertNone),
YUV(VLC_CODEC_YV9, CPictureYV9, convertNone),
YUV(VLC_CODEC_I410, CPictureI410_8, convertNone),
YUV(VLC_CODEC_I411, CPictureI411_8, convertNone),
YUV(VLC_CODEC_YV12, CPictureYV12, convertNone),
YUV(VLC_CODEC_NV12, CPictureNV12, convertNone),
YUV(VLC_CODEC_NV21, CPictureNV21, convertNone),
YUV(VLC_CODEC_J420, CPictureI420_8, convertNone),
YUV(VLC_CODEC_I420, CPictureI420_8, convertNone),
#ifdef WORDS_BIGENDIAN
YUV(VLC_CODEC_I420_9B, CPictureI420_16, convert8To9Bits),
YUV(VLC_CODEC_I420_10B, CPictureI420_16, convert8To10Bits),
#else
YUV(VLC_CODEC_I420_9L, CPictureI420_16, convert8To9Bits),
YUV(VLC_CODEC_I420_10L, CPictureI420_16, convert8To10Bits),
#endif
YUV(VLC_CODEC_J422, CPictureI422_8, convertNone),
YUV(VLC_CODEC_I422, CPictureI422_8, convertNone),
#ifdef WORDS_BIGENDIAN
YUV(VLC_CODEC_I422_9B, CPictureI422_16, convert8To9Bits),
YUV(VLC_CODEC_I422_10B, CPictureI422_16, convert8To10Bits),
#else
YUV(VLC_CODEC_I422_9L, CPictureI422_16, convert8To9Bits),
YUV(VLC_CODEC_I422_10L, CPictureI422_16, convert8To10Bits),
#endif
YUV(VLC_CODEC_J444, CPictureI444_8, convertNone),
YUV(VLC_CODEC_I444, CPictureI444_8, convertNone),
#ifdef WORDS_BIGENDIAN
YUV(VLC_CODEC_I444_9B, CPictureI444_16, convert8To9Bits),
YUV(VLC_CODEC_I444_10B, CPictureI444_16, convert8To10Bits),
#else
YUV(VLC_CODEC_I444_9L, CPictureI444_16, convert8To9Bits),
YUV(VLC_CODEC_I444_10L, CPictureI444_16, convert8To10Bits),
#endif
YUV(VLC_CODEC_YUYV, CPictureYUYV, convertNone),
YUV(VLC_CODEC_UYVY, CPictureUYVY, convertNone),
YUV(VLC_CODEC_YVYU, CPictureYVYU, convertNone),
YUV(VLC_CODEC_VYUY, CPictureVYUY, convertNone),
#undef RGB
#undef YUV
};
struct filter_sys_t {
filter_sys_t() : blend(NULL)
{
}
blend_function_t blend;
};
/**
* It blends 2 picture together.
*/
static void Blend(filter_t *filter,
picture_t *dst, const picture_t *src,
int x_offset, int y_offset, int alpha)
{
filter_sys_t *sys = filter->p_sys;
int width = __MIN((int)filter->fmt_out.video.i_visible_width - x_offset,
(int)filter->fmt_in.video.i_visible_width);
int height = __MIN((int)filter->fmt_out.video.i_visible_height - y_offset,
(int)filter->fmt_in.video.i_visible_height);
if (width <= 0 || height <= 0 || alpha <= 0)
return;
video_format_FixRgb(&filter->fmt_out.video);
video_format_FixRgb(&filter->fmt_in.video);
sys->blend(CPicture(dst, &filter->fmt_out.video,
filter->fmt_out.video.i_x_offset + x_offset,
filter->fmt_out.video.i_y_offset + y_offset),
CPicture(src, &filter->fmt_in.video,
filter->fmt_in.video.i_x_offset,
filter->fmt_in.video.i_y_offset),
width, height, alpha);
}
static int Open(vlc_object_t *object)
{
filter_t *filter = (filter_t *)object;
const vlc_fourcc_t src = filter->fmt_in.video.i_chroma;
const vlc_fourcc_t dst = filter->fmt_out.video.i_chroma;
filter_sys_t *sys = new filter_sys_t();
for (size_t i = 0; i < sizeof(blends) / sizeof(*blends); i++) {
if (blends[i].src == src && blends[i].dst == dst)
sys->blend = blends[i].blend;
}
if (!sys->blend) {
msg_Err(filter, "no matching alpha blending routine (chroma: %4.4s -> %4.4s)",
(char *)&src, (char *)&dst);
delete sys;
return VLC_EGENERIC;
}
filter->pf_video_blend = Blend;
filter->p_sys = sys;
return VLC_SUCCESS;
}
static void Close(vlc_object_t *object)
{
filter_t *filter = (filter_t *)object;
delete filter->p_sys;
}
......@@ -1070,7 +1070,7 @@ modules/video_filter/atmo/AtmoZoneDefinition.h
modules/video_filter/audiobargraph_v.c
modules/video_filter/ball.c
modules/video_filter/blendbench.c
modules/video_filter/blend.c
modules/video_filter/blend.cpp
modules/video_filter/bluescreen.c
modules/video_filter/canvas.c
modules/video_filter/chain.c
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment