Commit 93ff7f1c authored by Christophe Massiot's avatar Christophe Massiot

* Optimisation du parseur ;

* R�activation du motion DMV ;

Pour info, on torche largement le d�codeur de r�f�rence (50 %).
parent e05c74bf
......@@ -23,26 +23,21 @@ typedef void (*f_motion_t)( struct macroblock_s* );
/*****************************************************************************
* Prototypes
*****************************************************************************/
/* Empty function for intra macroblocks motion compensation */
void vdec_MotionDummy( struct macroblock_s * p_mb );
/* Motion compensation */
void vdec_MotionFieldField420( struct macroblock_s * p_mb );
void vdec_MotionField16x8420( struct macroblock_s * p_mb );
void vdec_MotionFieldDMV( struct macroblock_s * p_mb );
void vdec_MotionFieldDMV420( struct macroblock_s * p_mb );
void vdec_MotionFrameFrame420( struct macroblock_s * p_mb );
void vdec_MotionFrameField420( struct macroblock_s * p_mb );
void vdec_MotionFrameDMV( struct macroblock_s * p_mb );
void vdec_MotionFrameDMV420( struct macroblock_s * p_mb );
void vdec_MotionFieldField422( struct macroblock_s * p_mb );
void vdec_MotionField16x8422( struct macroblock_s * p_mb );
void vdec_MotionFieldDMV( struct macroblock_s * p_mb );
void vdec_MotionFieldDMV422( struct macroblock_s * p_mb );
void vdec_MotionFrameFrame422( struct macroblock_s * p_mb );
void vdec_MotionFrameField422( struct macroblock_s * p_mb );
void vdec_MotionFrameDMV( struct macroblock_s * p_mb );
void vdec_MotionFrameDMV422( struct macroblock_s * p_mb );
void vdec_MotionFieldField444( struct macroblock_s * p_mb );
void vdec_MotionField16x8444( struct macroblock_s * p_mb );
void vdec_MotionFieldDMV( struct macroblock_s * p_mb );
void vdec_MotionFieldDMV444( struct macroblock_s * p_mb );
void vdec_MotionFrameFrame444( struct macroblock_s * p_mb );
void vdec_MotionFrameField444( struct macroblock_s * p_mb );
void vdec_MotionFrameDMV( struct macroblock_s * p_mb );
\ No newline at end of file
void vdec_MotionFrameDMV444( struct macroblock_s * p_mb );
\ No newline at end of file
......@@ -74,6 +74,10 @@ static __inline__ macroblock_t * vpar_NewMacroblock( video_fifo_t * p_fifo )
/* No more structures available. This should not happen ! */
intf_DbgMsg("vpar debug: macroblock list is empty, delaying\n");
vlc_mutex_unlock( &P_buffer.lock );
if( p_fifo->p_vpar->b_die )
{
return( NULL );
}
msleep(VPAR_OUTMEM_SLEEP);
vlc_mutex_lock( &P_buffer.lock );
}
......
......@@ -15,6 +15,12 @@
* "video_fifo.h"
*****************************************************************************/
/*****************************************************************************
* Function pointers
*****************************************************************************/
typedef void (*f_parse_mb_t)( struct vpar_thread_s*, int *, int, int,
boolean_t, int, int, int, boolean_t);
/*****************************************************************************
* macroblock_t : information on a macroblock
*****************************************************************************/
......@@ -22,16 +28,8 @@ typedef struct macroblock_s
{
int i_mb_type; /* macroblock type */
int i_coded_block_pattern;
int i_structure;
int i_current_structure;
boolean_t b_P_coding_type; /* Is it P_CODING_TYPE ? */
picture_t * p_picture;
int i_l_x, i_l_y; /* position of macroblock (lum) */
int i_c_x, i_c_y; /* position of macroblock (chroma) */
int i_chroma_nb_blocks; /* nb of bks for a chr comp */
int i_l_stride; /* number of yuv_data_t to ignore
* when changing lines */
int i_c_stride; /* idem, for chroma */
picture_t * p_picture;
/* IDCT information */
dctelem_t ppi_blocks[12][64]; /* blocks */
......@@ -44,16 +42,24 @@ typedef struct macroblock_s
picture_t * p_forward;
int ppi_field_select[2][2];
int pppi_motion_vectors[2][2][2];
int pi_dm_vector[2];
boolean_t b_top_field_first;
int ppi_dmv[2][2];
int i_l_x, i_c_x;
int i_motion_l_y;
int i_motion_c_y;
boolean_t b_motion_field;
int i_l_stride; /* number of yuv_data_t to
* ignore when changing line */
int i_c_stride; /* idem, for chroma */
boolean_t b_P_second; /* Second field of a P picture ?
* (used to determine the predicting
* frame) */
boolean_t b_motion_field; /* Field we are predicting
* (top field or bottom field) */
/* AddBlock information */
yuv_data_t * p_data[12]; /* pointer to the position
* in the final picture */
int i_addb_l_stride, i_addb_c_stride;
/* nb of coeffs to jump when changing lines */
} macroblock_t;
/*****************************************************************************
......@@ -61,12 +67,8 @@ typedef struct macroblock_s
*****************************************************************************/
typedef struct
{
int i_mb_type, i_motion_type, i_mv_count, i_mv_format;
boolean_t b_dmv;
/* Macroblock Type */
int i_coded_block_pattern;
boolean_t b_dct_type;
int i_motion_type, i_mv_count, i_mv_format;
boolean_t b_dmv, b_dct_type;
int i_l_x, i_l_y, i_c_x, i_c_y;
} macroblock_parsing_t;
......@@ -139,12 +141,53 @@ void vpar_InitPMBType( struct vpar_thread_s * p_vpar );
void vpar_InitBMBType( struct vpar_thread_s * p_vpar );
void vpar_InitCodedPattern( struct vpar_thread_s * p_vpar );
void vpar_InitDCTTables( struct vpar_thread_s * p_vpar );
void vpar_ParseMacroblock( struct vpar_thread_s * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base );
int vpar_CodedPattern420( struct vpar_thread_s* p_vpar );
int vpar_CodedPattern422( struct vpar_thread_s* p_vpar );
int vpar_CodedPattern444( struct vpar_thread_s* p_vpar );
int vpar_IMBType( struct vpar_thread_s* p_vpar );
int vpar_PMBType( struct vpar_thread_s* p_vpar );
int vpar_BMBType( struct vpar_thread_s* p_vpar );
int vpar_DMBType( struct vpar_thread_s* p_vpar );
void vpar_ParseMacroblockGENERIC( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2I420F0( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2P420F0( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2B420F0( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2I420T0( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2P420T0( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2B420T0( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2I420B1( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2P420B1( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
void vpar_ParseMacroblock2B420B1( struct vpar_thread_s* p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field );
......@@ -19,11 +19,8 @@
* Function pointers
*****************************************************************************/
struct vpar_thread_s;
struct macroblock_s;
typedef void (*f_slice_header_t)( struct vpar_thread_s*, int*, int, u32);
typedef int (*f_chroma_pattern_t)( struct vpar_thread_s* );
typedef int (*f_macroblock_type_t)( struct vpar_thread_s* );
/*****************************************************************************
* quant_matrix_t : Quantization Matrix
......@@ -50,8 +47,6 @@ typedef struct sequence_s
f_slice_header_t pf_slice_header;
quant_matrix_t intra_quant, nonintra_quant;
quant_matrix_t chroma_intra_quant, chroma_nonintra_quant;
void (*pf_decode_mv)( struct vpar_thread_s *, struct macroblock_s *, int );
f_chroma_pattern_t pf_decode_pattern;
/* Chromatic information */
unsigned int i_chroma_format;
......@@ -64,12 +59,12 @@ typedef struct sequence_s
picture_t * p_backward;
/* Copyright extension */
boolean_t b_copyright_flag; /* Whether the following
information is significant
or not. */
u8 i_copyright_id;
boolean_t b_original;
u64 i_copyright_nb;
boolean_t b_copyright_flag; /* Whether the following
information is significant
or not. */
u8 i_copyright_id;
boolean_t b_original;
u64 i_copyright_nb;
} sequence_t;
/*****************************************************************************
......@@ -89,6 +84,8 @@ typedef struct picture_parsing_s
boolean_t b_repeat_first_field;
int i_l_stride, i_c_stride;
f_parse_mb_t pf_parse_mb;
/* Used for second field management */
int i_current_structure;
......@@ -100,8 +97,6 @@ typedef struct picture_parsing_s
/* Relative to the current field */
int i_coding_type, i_structure;
boolean_t b_frame_structure;
boolean_t b_motion_field;
f_macroblock_type_t pf_macroblock_type;
boolean_t b_error;
} picture_parsing_t;
......
......@@ -455,59 +455,6 @@ static __inline__ void Motion444(
i_select, b_average );
}
/*****************************************************************************
* DualPrimeArithmetic : Dual Prime Additional arithmetic (7.6.3.6)
*****************************************************************************/
static __inline__ void DualPrimeArithmetic( macroblock_t * p_mb,
int ppi_dmv[2][2],
int i_mv_x, int i_mv_y )
{
if( p_mb->i_structure == FRAME_STRUCTURE )
{
if( p_mb->b_top_field_first )
{
/* vector for prediction of top field from bottom field */
ppi_dmv[0][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + p_mb->pi_dm_vector[0];
ppi_dmv[0][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + p_mb->pi_dm_vector[1] - 1;
/* vector for prediction of bottom field from top field */
ppi_dmv[1][0] = ((3*i_mv_x + (i_mv_x > 0)) >> 1) + p_mb->pi_dm_vector[0];
ppi_dmv[1][1] = ((3*i_mv_y + (i_mv_y > 0)) >> 1) + p_mb->pi_dm_vector[1] + 1;
}
else
{
/* vector for prediction of top field from bottom field */
ppi_dmv[0][0] = ((3*i_mv_x + (i_mv_x > 0)) >> 1) + p_mb->pi_dm_vector[0];
ppi_dmv[0][1] = ((3*i_mv_y + (i_mv_y > 0)) >> 1) + p_mb->pi_dm_vector[1] - 1;
/* vector for prediction of bottom field from top field */
ppi_dmv[1][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + p_mb->pi_dm_vector[0];
ppi_dmv[1][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + p_mb->pi_dm_vector[1] + 1;
}
}
else
{
/* vector for prediction from field of opposite 'parity' */
ppi_dmv[0][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + p_mb->pi_dm_vector[0];
ppi_dmv[0][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + p_mb->pi_dm_vector[1];
/* correct for vertical field shift */
if( p_mb->i_structure == TOP_FIELD )
ppi_dmv[0][1]--;
else
ppi_dmv[0][1]++;
}
}
/*****************************************************************************
* vdec_MotionDummy : motion compensation for an intra macroblock
*****************************************************************************/
void vdec_MotionDummy( macroblock_t * p_mb )
{
/* Nothing to do :) */
}
/*****************************************************************************
* vdec_MotionFieldField : motion compensation for field motion type (field)
*****************************************************************************/
......@@ -516,8 +463,7 @@ void vdec_MotionDummy( macroblock_t * p_mb )
\
if( p_mb->i_mb_type & MB_MOTION_FORWARD ) \
{ \
if( p_mb->b_P_coding_type \
&& (p_mb->i_current_structure == FRAME_STRUCTURE) \
if( p_mb->b_P_second \
&& (p_mb->b_motion_field != p_mb->ppi_field_select[0][0]) )\
p_pred = p_mb->p_picture; \
else \
......@@ -573,8 +519,7 @@ void vdec_MotionFieldField444( macroblock_t * p_mb )
\
if( p_mb->i_mb_type & MB_MOTION_FORWARD ) \
{ \
if( p_mb->b_P_coding_type \
&& (p_mb->i_current_structure == FRAME_STRUCTURE) \
if( p_mb->b_P_second \
&& (p_mb->b_motion_field != p_mb->ppi_field_select[0][0]) )\
p_pred = p_mb->p_picture; \
else \
......@@ -586,8 +531,7 @@ void vdec_MotionFieldField444( macroblock_t * p_mb )
p_mb->pppi_motion_vectors[0][0][1], \
p_mb->i_l_stride, p_mb->i_c_stride, 8, 0, 0 ); \
\
if( p_mb->b_P_coding_type \
&& (p_mb->i_current_structure == FRAME_STRUCTURE) \
if( p_mb->b_P_second \
&& (p_mb->b_motion_field != p_mb->ppi_field_select[1][0]) )\
p_pred = p_mb->p_picture; \
else \
......@@ -649,44 +593,45 @@ void vdec_MotionField16x8444( macroblock_t * p_mb )
}
/*****************************************************************************
* vdec_MotionFieldDMV : motion compensation for dmv motion type (field)
* vdec_MotionFieldDMVXXX : motion compensation for dmv motion type (field)
*****************************************************************************/
void vdec_MotionFieldDMV( macroblock_t * p_mb )
#define FIELDDMV( MOTION ) \
{ \
/* This is necessarily a MOTION_FORWARD only macroblock, in a P \
* picture. */ \
picture_t * p_pred; \
\
/* predict from field of same parity */ \
MOTION( p_mb, p_mb->p_forward, \
p_mb->b_motion_field, p_mb->b_motion_field, \
p_mb->pppi_motion_vectors[0][0][0], \
p_mb->pppi_motion_vectors[0][0][1], \
p_mb->i_l_stride, p_mb->i_c_stride, 16, 0, 0 ); \
\
if( p_mb->b_P_second ) \
p_pred = p_mb->p_picture; \
else \
p_pred = p_mb->p_forward; \
\
/* predict from field of opposite parity */ \
MOTION( p_mb, p_pred, !p_mb->b_motion_field, p_mb->b_motion_field, \
p_mb->ppi_dmv[0][0], p_mb->ppi_dmv[0][1], \
p_mb->i_l_stride, p_mb->i_c_stride, 16, 0, 1 ); \
} /* FIELDDMV */
void vdec_MotionFieldDMV420( macroblock_t * p_mb )
{
#if 0
/* This is necessarily a MOTION_FORWARD only macroblock */
motion_arg_t args;
picture_t * p_pred;
int ppi_dmv[2][2];
args.i_height = 16;
args.b_average = 0;
args.b_dest_field = p_mb->b_motion_field;
args.i_offset = 0;
if( p_mb->i_current_structure == FRAME_STRUCTURE )
p_pred = p_mb->p_picture;
else
p_pred = p_mb->p_forward;
DualPrimeArithmetic( p_mb, ppi_dmv, p_mb->pppi_motion_vectors[0][0][0],
p_mb->pppi_motion_vectors[0][0][1] );
/* predict from field of same parity */
args.p_source = p_mb->p_forward;
args.b_source_field = p_mb->b_motion_field;
args.i_mv_x = p_mb->pppi_motion_vectors[0][0][0];
args.i_mv_y = p_mb->pppi_motion_vectors[0][0][1];
p_mb->pf_chroma_motion( p_mb, &args );
/* predict from field of opposite parity */
args.b_average = 1;
args.p_source = p_pred;
args.b_source_field = !p_mb->b_motion_field;
args.i_mv_x = ppi_dmv[0][0];
args.i_mv_y = ppi_dmv[0][1];
p_mb->pf_chroma_motion( p_mb, &args );
#endif
FIELDDMV( Motion420 )
}
void vdec_MotionFieldDMV422( macroblock_t * p_mb )
{
FIELDDMV( Motion422 )
}
void vdec_MotionFieldDMV444( macroblock_t * p_mb )
{
FIELDDMV( Motion444 )
}
/*****************************************************************************
......@@ -800,53 +745,49 @@ void vdec_MotionFrameField444( macroblock_t * p_mb )
}
/*****************************************************************************
* vdec_MotionFrameDMV : motion compensation for dmv motion type (frame)
* vdec_MotionFrameDMVXXX : motion compensation for dmv motion type (frame)
*****************************************************************************/
void vdec_MotionFrameDMV( macroblock_t * p_mb )
#define FRAMEDMV( MOTION ) \
{ \
/* This is necessarily a MOTION_FORWARD only macroblock, in a P \
* picture. */ \
\
/* predict top field from top field */ \
MOTION( p_mb, p_mb->p_forward, 0, 0, \
p_mb->pppi_motion_vectors[0][0][0], \
p_mb->pppi_motion_vectors[0][0][1], \
/* ????? >> 1 ? */ \
p_mb->i_l_stride << 1, p_mb->i_c_stride << 1, 8, 0, 0 ); \
\
/* predict and add to top field from bottom field */ \
MOTION( p_mb, p_mb->p_forward, 1, 0, \
p_mb->ppi_dmv[0][0], p_mb->ppi_dmv[0][1], \
p_mb->i_l_stride << 1, p_mb->i_c_stride << 1, 8, 0, 1 ); \
\
/* predict bottom field from bottom field */ \
MOTION( p_mb, p_mb->p_forward, 1, 1, \
p_mb->pppi_motion_vectors[0][0][0], \
p_mb->pppi_motion_vectors[0][0][1], \
/* ????? >> 1 ? */ \
p_mb->i_l_stride << 1, p_mb->i_c_stride << 1, 8, 0, 0 ); \
\
/* predict and add to bottom field from top field */ \
MOTION( p_mb, p_mb->p_forward, 1, 0, \
p_mb->ppi_dmv[1][0], p_mb->ppi_dmv[1][1], \
p_mb->i_l_stride << 1, p_mb->i_c_stride << 1, 8, 0, 1 ); \
} /* FRAMEDMV */
void vdec_MotionFrameDMV420( macroblock_t * p_mb )
{
#if 0
/* This is necessarily a MOTION_FORWARD only macroblock */
motion_arg_t args;
int ppi_dmv[2][2];
args.i_l_x_step = p_mb->i_l_stride << 1;
args.i_c_x_step = p_mb->i_c_stride << 1;
args.i_height = 8;
args.b_average = 0;
args.b_dest_field = 0;
args.i_offset = 0;
args.p_source = p_mb->p_forward;
DualPrimeArithmetic( p_mb, ppi_dmv, p_mb->pppi_motion_vectors[0][0][0],
p_mb->pppi_motion_vectors[0][0][1] );
/* predict top field from top field */
args.b_source_field = 0;
args.i_mv_x = p_mb->pppi_motion_vectors[0][0][0];
args.i_mv_y = p_mb->pppi_motion_vectors[0][0][1] >> 1;
p_mb->pf_chroma_motion( p_mb, &args );
/* predict and add to top field from bottom field */
args.b_average = 1;
args.b_source_field = 1;
args.i_mv_x = ppi_dmv[0][0];
args.i_mv_y = ppi_dmv[0][1];
p_mb->pf_chroma_motion( p_mb, &args );
/* predict bottom field from bottom field */
args.b_average = 0;
args.b_dest_field = 1;
args.b_source_field = 0;
args.i_mv_x = p_mb->pppi_motion_vectors[0][0][0];
args.i_mv_y = p_mb->pppi_motion_vectors[0][0][1] >> 1;
p_mb->pf_chroma_motion( p_mb, &args );
/* predict and add to bottom field from top field */
args.b_average = 1;
args.b_source_field = 1;
args.i_mv_x = ppi_dmv[1][0];
args.i_mv_y = ppi_dmv[1][1];
p_mb->pf_chroma_motion( p_mb, &args );
#endif
FRAMEDMV( Motion420 )
}
void vdec_MotionFrameDMV422( macroblock_t * p_mb )
{
FRAMEDMV( Motion422 )
}
void vdec_MotionFrameDMV444( macroblock_t * p_mb )
{
FRAMEDMV( Motion444 )
}
......@@ -47,8 +47,20 @@ static void vpar_DecodeMPEG2Non( vpar_thread_t * p_vpar, macroblock_t * p_mb, in
static void vpar_DecodeMPEG2Intra( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b );
/*
* Initialisation tables
* Welcome to vpar_blocks.c ! Here's where the heavy processor-critical parsing
* task is done. This file is divided in several parts :
* - Initialization of the lookup tables
* - Decoding of coded blocks
* - Decoding of motion vectors
* - Decoding of the other macroblock structures
* It's a pretty long file. Good luck !
*/
/*
* Initialization tables
*/
/* Table for coded_block_pattern resolution */
static lookup_t pl_coded_pattern_init_table[512] =
{ {MB_ERROR, 0}, {0, 9}, {39, 9}, {27, 9}, {59, 9}, {55, 9}, {47, 9}, {31, 9},
......@@ -347,6 +359,8 @@ static dct_lookup_t pl_DCT_tab6[16] =
{13,2,16}, {12,2,16}, {11,2,16}, {31,1,16},
{30,1,16}, {29,1,16}, {28,1,16}, {27,1,16}
};
/*
* Initialization of lookup tables
*/
......@@ -378,7 +392,7 @@ void vpar_InitCrop( vpar_thread_t * p_vpar )
#endif
/*****************************************************************************
* InitMbAddrInc : Initialize the lookup table for mb_addr_inc
* vpar_InitMbAddrInc : Initialize the lookup table for mb_addr_inc
*****************************************************************************/
/* Function for filling up the lookup table for mb_addr_inc */
......@@ -441,7 +455,7 @@ void vpar_InitMbAddrInc( vpar_thread_t * p_vpar )
}
/*****************************************************************************
* Init*MBType : Initialize lookup table for the Macroblock type
* vpar_Init*MBType : Initialize lookup table for the Macroblock type
*****************************************************************************/
/* Fonction for filling up the tables */
......@@ -500,8 +514,8 @@ void vpar_InitBMBType( vpar_thread_t * p_vpar )
}
/*****************************************************************************
* InitCodedPattern : Initialize the lookup table for decoding
* coded block pattern
* vpar_InitCodedPattern : Initialize the lookup table for decoding
* coded block pattern
*****************************************************************************/
void vpar_InitCodedPattern( vpar_thread_t * p_vpar )
{
......@@ -509,8 +523,8 @@ void vpar_InitCodedPattern( vpar_thread_t * p_vpar )
}
/*****************************************************************************
* InitDCT : Initialize tables giving the length of the dct coefficient
* from the vlc code
* vpar_InitDCTTables : Initialize tables giving the length of the dct
* coefficient from the vlc code
*****************************************************************************/
/* First fonction for filling the table */
......@@ -560,441 +574,650 @@ void vpar_InitDCTTables( vpar_thread_t * p_vpar )
FillDCTTable( ppl_dct_coef[1], pl_DCT_tab6, 1, 16, 16 );
}
/*
* Macroblock parsing functions
* Block parsing
*/
/*****************************************************************************
* InitMacroblock : Initialize macroblock values
* vpar_DecodeMPEG1Non : decode MPEG-1 non-intra blocks
*****************************************************************************/
static __inline__ void InitMacroblock( vpar_thread_t * p_vpar,
macroblock_t * p_mb )
static void vpar_DecodeMPEG1Non( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
{
p_mb->p_picture = p_vpar->picture.p_picture;
p_mb->i_structure = p_vpar->picture.i_structure;
p_mb->i_current_structure = p_vpar->picture.i_current_structure;
p_mb->b_top_field_first = p_vpar->picture.b_top_field_first;
p_mb->i_l_x = p_vpar->mb.i_l_x;
p_mb->i_motion_l_y = p_mb->i_l_y = p_vpar->mb.i_l_y;
p_mb->i_c_x = p_vpar->mb.i_c_x;
p_mb->i_motion_c_y = p_mb->i_c_y = p_vpar->mb.i_c_y;
p_mb->i_chroma_nb_blocks = p_vpar->sequence.i_chroma_nb_blocks;
p_mb->b_P_coding_type = ( p_vpar->picture.i_coding_type == P_CODING_TYPE );
if( (p_vpar->picture.i_coding_type == P_CODING_TYPE) ||
(p_vpar->picture.i_coding_type == B_CODING_TYPE) )
p_mb->p_forward = p_vpar->sequence.p_forward;
else
p_mb->p_forward = NULL;
if( p_vpar->picture.i_coding_type == B_CODING_TYPE )
p_mb->p_backward = p_vpar->sequence.p_backward;
else
p_mb->p_backward = NULL;
p_mb->i_addb_l_stride = (p_mb->i_l_stride = p_vpar->picture.i_l_stride) - 8;
p_mb->i_addb_c_stride = (p_mb->i_c_stride = p_vpar->picture.i_c_stride) - 8;
/* Update macroblock real position. */
p_vpar->mb.i_l_x += 16;
p_vpar->mb.i_l_y += (p_vpar->mb.i_l_x / p_vpar->sequence.i_width)
* (2 - p_vpar->picture.b_frame_structure) * 16;
p_vpar->mb.i_l_x %= p_vpar->sequence.i_width;
p_vpar->mb.i_c_x += p_vpar->sequence.i_chroma_mb_width;
p_vpar->mb.i_c_y += (p_vpar->mb.i_c_x / p_vpar->sequence.i_chroma_width)
* (2 - p_vpar->picture.b_frame_structure)
* p_vpar->sequence.i_chroma_mb_height;
p_vpar->mb.i_c_x %= p_vpar->sequence.i_chroma_width;
if( (p_mb->b_motion_field = p_vpar->picture.b_motion_field) )
if( p_vpar->picture.i_coding_type == D_CODING_TYPE )
{
p_mb->i_motion_l_y--;
p_mb->i_motion_c_y--;
/* Remove end_of_macroblock (always 1, prevents startcode emulation)
* ISO/IEC 11172-2 section 2.4.2.7 and 2.4.3.6 */
RemoveBits( &p_vpar->bit_stream, 1 );
}
}
/*****************************************************************************
* MacroblockAddressIncrement : Get the macroblock_address_increment field
* vpar_DecodeMPEG1Intra : decode MPEG-1 intra blocks
*****************************************************************************/
static __inline__ int MacroblockAddressIncrement( vpar_thread_t * p_vpar )
static void vpar_DecodeMPEG1Intra( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
{
int i_addr_inc = 0;
/* Index in the lookup table mb_addr_inc */
int i_index = ShowBits( &p_vpar->bit_stream, 11 );
/* Test the presence of the escape character */
while( i_index == 8 )
if( p_vpar->picture.i_coding_type == D_CODING_TYPE )
{
RemoveBits( &p_vpar->bit_stream, 11 );
i_addr_inc += 33;
i_index = ShowBits( &p_vpar->bit_stream, 11 );
/* Remove end_of_macroblock (always 1, prevents startcode emulation)
* ISO/IEC 11172-2 section 2.4.2.7 and 2.4.3.6 */
RemoveBits( &p_vpar->bit_stream, 1 );
}
/* Affect the value from the lookup table */
i_addr_inc += p_vpar->pl_mb_addr_inc[i_index].i_value;
/* Dump the good number of bits */
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_mb_addr_inc[i_index].i_length );
return i_addr_inc;
}
/*****************************************************************************
* MacroblockModes : Get the macroblock_modes structure
* vpar_DecodeMPEG2Non : decode MPEG-2 non-intra blocks
*****************************************************************************/
static __inline__ void MacroblockModes( vpar_thread_t * p_vpar,
macroblock_t * p_mb )
static void vpar_DecodeMPEG2Non( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
{
static f_motion_t pppf_motion[4][2][4] =
{
{ {NULL, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL}
},
{ {NULL, vdec_MotionFieldField420, vdec_MotionField16x8420, vdec_MotionFieldDMV},
{NULL, vdec_MotionFrameField420, vdec_MotionFrameFrame420, vdec_MotionFrameDMV}
},
{ {NULL, vdec_MotionFieldField422, vdec_MotionField16x8422, vdec_MotionFieldDMV},
{NULL, vdec_MotionFrameField422, vdec_MotionFrameFrame422, vdec_MotionFrameDMV}
},
{ {NULL, vdec_MotionFieldField444, vdec_MotionField16x8444, vdec_MotionFieldDMV},
{NULL, vdec_MotionFrameField444, vdec_MotionFrameFrame444, vdec_MotionFrameDMV}
}
};
static int ppi_mv_count[2][4] = { {0, 1, 2, 1}, {0, 2, 1, 1} };
static int ppi_mv_format[2][4] = { {0, 1, 1, 1}, {0, 1, 2, 1} };
int i_parse;
int i_nc;
int i_cc;
int i_coef;
int i_type;
int i_code;
int i_length;
int i_pos;
int i_run;
int i_level;
int i_quant_type;
boolean_t b_sign;
int * ppi_quant[2];
/* Lookup Table for the chromatic component */
static int pi_cc_index[12] = { 0, 0, 0, 0, 1, 2, 1, 2, 1, 2 };
/* Get macroblock_type. */
p_vpar->mb.i_mb_type = (p_vpar->picture.pf_macroblock_type)( p_vpar );
p_mb->i_mb_type = p_vpar->mb.i_mb_type;
i_cc = pi_cc_index[i_b];
/* Determine whether it is luminance or not (chrominance) */
i_type = ( i_cc + 1 ) >> 1;
i_quant_type = (!i_type) || (p_vpar->sequence.i_chroma_format == CHROMA_420);
/* SCALABILITY : warning, we don't know if spatial_temporal_weight_code
* has to be dropped, take care if you use scalable streams. */
/* RemoveBits( &p_vpar->bit_stream, 2 ); */
/* Give a pointer to the quantization matrices for intra blocks */
ppi_quant[1] = p_vpar->sequence.nonintra_quant.pi_matrix;
ppi_quant[0] = p_vpar->sequence.chroma_nonintra_quant.pi_matrix;
/* Decoding of the AC coefficients */
if( !(p_vpar->mb.i_mb_type & (MB_MOTION_FORWARD | MB_MOTION_BACKWARD)) )
{
/* If mb_type has neither MOTION_FORWARD nor MOTION_BACKWARD, this
* is useless, but also harmless. */
p_vpar->mb.i_motion_type = MOTION_FRAME;
}
else
i_nc = 0;
i_coef = 0;
for( i_parse = 0; ; i_parse++ )
{
if( p_vpar->picture.i_structure == FRAME_STRUCTURE
&& p_vpar->picture.b_frame_pred_frame_dct )
i_code = ShowBits( &p_vpar->bit_stream, 16 );
if( i_code >= 16384 )
{
p_vpar->mb.i_motion_type = MOTION_FRAME;
if( i_parse == 0 )
{
i_run = pl_DCT_tab_dc[(i_code>>12)-4].i_run;
i_level = pl_DCT_tab_dc[(i_code>>12)-4].i_level;
i_length = pl_DCT_tab_dc[(i_code>>12)-4].i_length;
}
else
{
i_run = pl_DCT_tab_ac[(i_code>>12)-4].i_run;
i_level = pl_DCT_tab_ac[(i_code>>12)-4].i_level;
i_length = pl_DCT_tab_ac[(i_code>>12)-4].i_length;
}
}
else if( i_code >= 1024 )
{
i_run = pl_DCT_tab0[(i_code>>8)-4].i_run;
i_length = pl_DCT_tab0[(i_code>>8)-4].i_length;
i_level = pl_DCT_tab0[(i_code>>8)-4].i_level;
}
else
{
p_vpar->mb.i_motion_type = GetBits( &p_vpar->bit_stream, 2 );
i_run = ppl_dct_coef[0][i_code].i_run;
i_length = ppl_dct_coef[0][i_code].i_length;
i_level = ppl_dct_coef[0][i_code].i_level;
}
}
if( p_mb->b_P_coding_type && !(p_vpar->mb.i_mb_type & (MB_MOTION_FORWARD|MB_INTRA)) )
{
/* Special No-MC macroblock in P pictures (7.6.3.5). */
memset( p_vpar->slice.pppi_pmv, 0, 8*sizeof(int) );
memset( p_mb->pppi_motion_vectors, 0, 8*sizeof(int) );
p_vpar->mb.i_motion_type = MOTION_FRAME;
p_mb->ppi_field_select[0][0] = ( p_vpar->picture.i_current_structure == BOTTOM_FIELD );
}
if( p_vpar->mb.i_mb_type & MB_INTRA )
{
/* For the intra macroblocks, we use an empty motion
* compensation function */
p_mb->pf_motion = vdec_MotionDummy;
}
else
{
p_mb->pf_motion = pppf_motion[p_vpar->sequence.i_chroma_format]
[p_vpar->picture.b_frame_structure]
[p_vpar->mb.i_motion_type];
}
RemoveBits( &p_vpar->bit_stream, i_length );
p_vpar->mb.i_mv_count = ppi_mv_count[p_vpar->picture.b_frame_structure]
[p_vpar->mb.i_motion_type];
p_vpar->mb.i_mv_format = ppi_mv_format[p_vpar->picture.b_frame_structure]
[p_vpar->mb.i_motion_type];
switch( i_run )
{
case DCT_ESCAPE:
i_run = GetBits( &p_vpar->bit_stream, 6 );
i_level = GetBits( &p_vpar->bit_stream, 12 );
i_level = (b_sign = ( i_level > 2047 )) ? 4096 - i_level
: i_level;
break;
case DCT_EOB:
if( i_nc <= 1 )
{
p_mb->pf_idct[i_b] = vdec_SparseIDCT;
p_mb->pi_sparse_pos[i_b] = i_coef;
}
else
{
p_mb->pf_idct[i_b] = vdec_IDCT;
}
return;
p_vpar->mb.b_dct_type = 0;
if( (p_vpar->picture.i_structure == FRAME_STRUCTURE) &&
(!p_vpar->picture.b_frame_pred_frame_dct) &&
(p_vpar->mb.i_mb_type & (MB_PATTERN|MB_INTRA)) )
{
if( (p_vpar->mb.b_dct_type = GetBits( &p_vpar->bit_stream, 1 )) )
break;
default:
b_sign = GetBits( &p_vpar->bit_stream, 1 );
}
i_coef = i_parse;
i_parse += i_run;
i_nc ++;
if( i_parse >= 64 )
{
/* The DCT is coded on fields. Jump one line between each
* sample. */
p_mb->i_addb_l_stride <<= 1;
p_mb->i_addb_l_stride += 8;
/* With CHROMA_420, the DCT is necessarily frame-coded. */
if( p_vpar->sequence.i_chroma_format != CHROMA_420 )
{
p_mb->i_addb_c_stride <<= 1;
p_mb->i_addb_c_stride += 8;
}
break;
}
i_pos = pi_scan[p_vpar->picture.b_alternate_scan][i_parse];
i_level = ( ((i_level << 1) + 1) * p_vpar->slice.i_quantizer_scale
* ppi_quant[i_quant_type][i_pos] ) >> 5;
p_mb->ppi_blocks[i_b][i_pos] = b_sign ? -i_level : i_level;
}
p_vpar->mb.b_dmv = p_vpar->mb.i_motion_type == MOTION_DMV;
intf_ErrMsg("vpar error: DCT coeff (non-intra) is out of bounds\n");
p_vpar->picture.b_error = 1;
}
/*****************************************************************************
* vpar_ParseMacroblock : Parse the next macroblock
* vpar_DecodeMPEG2Intra : decode MPEG-2 intra blocks
*****************************************************************************/
void vpar_ParseMacroblock( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base )
static void vpar_DecodeMPEG2Intra( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
{
static f_decode_block_t pppf_decode_block[2][2] =
{ {vpar_DecodeMPEG1Non, vpar_DecodeMPEG1Intra},
{vpar_DecodeMPEG2Non, vpar_DecodeMPEG2Intra} };
static int pi_x[12] = {0,8,0,8,0,0,0,0,8,8,8,8};
static int pi_y[2][12] = { {0,0,8,8,0,0,8,8,0,0,8,8},
{0,0,1,1,0,0,1,1,0,0,1,1} };
int i_mb, i_b, i_mask;
macroblock_t * p_mb;
yuv_data_t * p_data1;
yuv_data_t * p_data2;
/************* DEBUG *************/
int i_inc;
static int i_count;
i_count++;
i_inc = MacroblockAddressIncrement( p_vpar );
*pi_mb_address += i_inc;
//*pi_mb_address += MacroblockAddressIncrement( p_vpar );
int i_parse;
int i_nc;
int i_cc;
int i_coef;
int i_type, i_quant_type;
int i_code;
int i_length;
int i_pos;
int i_dct_dc_size;
int i_dct_dc_diff;
int i_run;
int i_level;
boolean_t b_vlc_intra;
boolean_t b_sign;
int * ppi_quant[2];
/* Lookup Table for the chromatic component */
static int pi_cc_index[12] = { 0, 0, 0, 0, 1, 2, 1, 2, 1, 2 };
i_cc = pi_cc_index[i_b];
for( i_mb = i_mb_previous + 1; i_mb < *pi_mb_address; i_mb++ )
{
/* Skipped macroblock (ISO/IEC 13818-2 7.6.6). */
static int pi_dc_dct_reinit[4] = {128,256,512,1024};
static f_motion_t pf_motion_skipped[4][4] =
{
{NULL, NULL, NULL, NULL},
{NULL, vdec_MotionFieldField420, vdec_MotionFieldField420, vdec_MotionFrameFrame420},
{NULL, vdec_MotionFieldField422, vdec_MotionFieldField422, vdec_MotionFrameFrame422},
{NULL, vdec_MotionFieldField444, vdec_MotionFieldField444, vdec_MotionFrameFrame444},
};
/* Determine whether it is luminance or not (chrominance) */
i_type = ( i_cc + 1 ) >> 1;
i_quant_type = (!i_type) | (p_vpar->sequence.i_chroma_format == CHROMA_420);
/* Reset DC predictors (7.2.1). */
p_vpar->slice.pi_dc_dct_pred[0] = p_vpar->slice.pi_dc_dct_pred[1]
= p_vpar->slice.pi_dc_dct_pred[2]
= pi_dc_dct_reinit[p_vpar->picture.i_intra_dc_precision];
/* Give a pointer to the quantization matrices for intra blocks */
ppi_quant[1] = p_vpar->sequence.intra_quant.pi_matrix;
ppi_quant[0] = p_vpar->sequence.chroma_intra_quant.pi_matrix;
if( p_vpar->picture.i_coding_type == P_CODING_TYPE )
{
/* Reset motion vector predictors (ISO/IEC 13818-2 7.6.3.4). */
memset( p_vpar->slice.pppi_pmv, 0, 8*sizeof(int) );
#if 0
/* Decoding of the DC intra coefficient */
/* The nb of bits to parse depends on i_type */
i_code = ShowBits( &p_vpar->bit_stream, 9 + i_type );
/* To reduce memory occupation, there are two lookup tables
* See InitDCT above */
i_code5 = i_code >> (4+i_type);
/* Shall we lookup in the first or in the second table ? */
i_select = ( i_code5 == 31 );
/* Offset value for looking in the second table */
i_offset = 0x1f0 + ( i_type * 0x1f0 );
i_pos = ( i_code5 * ( ! i_select ) ) + ( ( i_code - i_offset ) * i_select );
i_dct_dc_size = p_vpar->pppl_dct_dc_size[i_type][i_select][i_pos].i_value;
#endif
if( !i_type/*i_b < 4*/ )
{
/* decode length */
i_code = ShowBits(&p_vpar->bit_stream, 5);
if (i_code<31)
{
i_dct_dc_size = pl_dct_dc_lum_init_table_1[i_code].i_value;
i_length = pl_dct_dc_lum_init_table_1[i_code].i_length;
RemoveBits( &p_vpar->bit_stream, i_length);
}
else
{
i_code = ShowBits(&p_vpar->bit_stream, 9) - 0x1f0;
i_dct_dc_size = pl_dct_dc_lum_init_table_2[i_code].i_value;
i_length = pl_dct_dc_lum_init_table_2[i_code].i_length;
RemoveBits( &p_vpar->bit_stream, i_length);
}
}
else
{
/* decode length */
i_code = ShowBits(&p_vpar->bit_stream, 5);
if( (p_mb = vpar_NewMacroblock( &p_vpar->vfifo )) == NULL )
if (i_code<31)
{
p_vpar->picture.b_error = 1;
intf_ErrMsg("vpar error: macroblock list is empty !\n");
return;
i_dct_dc_size = pl_dct_dc_chrom_init_table_1[i_code].i_value;
i_length = pl_dct_dc_chrom_init_table_1[i_code].i_length;
RemoveBits(&p_vpar->bit_stream, i_length);
}
#ifdef VDEC_SMP
p_vpar->picture.pp_mb[i_mb_base + i_mb] = p_mb;
#endif
else
{
i_code = ShowBits(&p_vpar->bit_stream, 10) - 0x3e0;
i_dct_dc_size = pl_dct_dc_chrom_init_table_2[i_code].i_value;
i_length = pl_dct_dc_chrom_init_table_2[i_code].i_length;
RemoveBits( &p_vpar->bit_stream, i_length);
}
}
if (i_dct_dc_size==0)
i_dct_dc_diff = 0;
else
{
i_dct_dc_diff = GetBits( &p_vpar->bit_stream, i_dct_dc_size);
if ((i_dct_dc_diff & (1<<(i_dct_dc_size-1)))==0)
i_dct_dc_diff-= (1<<i_dct_dc_size) - 1;
}
InitMacroblock( p_vpar, p_mb );
/* Motion type is picture structure. */
p_mb->pf_motion = pf_motion_skipped[p_vpar->sequence.i_chroma_format]
[p_vpar->picture.i_structure];
p_mb->i_mb_type = MB_MOTION_FORWARD;
p_mb->i_coded_block_pattern = 0;
memset( p_mb->pppi_motion_vectors, 0, 8*sizeof(int) );
/* Dump the variable length code */
//RemoveBits( &p_vpar->bit_stream,
// p_vpar->pppl_dct_dc_size[i_type][i_select][i_pos].i_length );
/* Read the actual code with the good length */
p_vpar->slice.pi_dc_dct_pred[i_cc] += i_dct_dc_diff;
/* Set the field we use for motion compensation */
p_mb->ppi_field_select[0][0] = p_mb->ppi_field_select[0][1]
= ( p_vpar->picture.i_current_structure == BOTTOM_FIELD );
p_mb->ppi_blocks[i_b][0] = ( p_vpar->slice.pi_dc_dct_pred[i_cc] <<
( 3 - p_vpar->picture.i_intra_dc_precision ) );
i_nc = ( p_vpar->slice.pi_dc_dct_pred[i_cc] != 0 );
#ifndef VDEC_SMP
/* Decode the macroblock NOW ! */
vdec_DecodeMacroblock( p_vpar->pp_vdec[0], p_mb );
#endif
/* Decoding of the AC coefficients */
i_coef = 0;
b_vlc_intra = p_vpar->picture.b_intra_vlc_format;
for( i_parse = 1; /*i_parse < 64*/; i_parse++ )
{
i_code = ShowBits( &p_vpar->bit_stream, 16 );
if( i_code >= 16384 )
{
if( b_vlc_intra )
{
i_run = pl_DCT_tab0a[(i_code>>8)-4].i_run;
i_level = pl_DCT_tab0a[(i_code>>8)-4].i_level;
i_length = pl_DCT_tab0a[(i_code>>8)-4].i_length;
//fprintf( stderr, "**********> %d, %d, %d *******\n", i_run, i_level, (i_code>>8)-4 );
}
else
{
i_run = pl_DCT_tab_ac[(i_code>>12)-4].i_run;
i_level = pl_DCT_tab_ac[(i_code>>12)-4].i_level;
i_length = pl_DCT_tab_ac[(i_code>>12)-4].i_length;
}
}
else
{
i_run = ppl_dct_coef[b_vlc_intra][i_code].i_run;
i_length = ppl_dct_coef[b_vlc_intra][i_code].i_length;
i_level = ppl_dct_coef[b_vlc_intra][i_code].i_level;
}
#if 0
{
int code = i_code;
int intra_vlc_format = b_vlc_intra;
dct_lookup_t tab;
if (code>=16384 && !intra_vlc_format)
tab = pl_DCT_tab_ac[(code>>12)-4];
else if (code>=1024)
{
if (intra_vlc_format)
tab = pl_DCT_tab0a[(code>>8)-4];
else
tab = pl_DCT_tab0[(code>>8)-4];
}
else if (code>=512)
{
if (intra_vlc_format)
tab = pl_DCT_tab1a[(code>>6)-8];
else
tab = pl_DCT_tab1[(code>>6)-8];
}
else if (code>=256)
tab = pl_DCT_tab2[(code>>4)-16];
else if (code>=128)
tab = pl_DCT_tab3[(code>>3)-16];
else if (code>=64)
tab = pl_DCT_tab4[(code>>2)-16];
else if (code>=32)
tab = pl_DCT_tab5[(code>>1)-16];
else if (code>=16)
tab = pl_DCT_tab6[code-16];
else
{
fprintf( stderr, "invalid Huffman code in Decode_MPEG2_Intra_Block()\n");
}
/* Get a macroblock structure. */
if( (p_mb = vpar_NewMacroblock( &p_vpar->vfifo )) == NULL )
if( (i_run != tab.i_run) || (i_length != tab.i_length) || (i_level != tab.i_level) )
{
p_vpar->picture.b_error = 1;
intf_ErrMsg("vpar error: macroblock list is empty !\n");
return;
fprintf( stderr, "ET M....... !!!\n" );
exit(0);
}
#ifdef VDEC_SMP
p_vpar->picture.pp_mb[i_mb_base + *pi_mb_address] = p_mb;
}
#endif
InitMacroblock( p_vpar, p_mb );
/* Parse off macroblock_modes structure. */
MacroblockModes( p_vpar, p_mb );
if( p_vpar->mb.i_mb_type & MB_QUANT )
{
LoadQuantizerScale( p_vpar );
RemoveBits( &p_vpar->bit_stream, i_length );
switch( i_run )
{
case DCT_ESCAPE:
i_run = GetBits( &p_vpar->bit_stream, 6 );
i_level = GetBits( &p_vpar->bit_stream, 12 );
/*p_mb->ppi_blocks[i_b][i_parse] = ( b_sign = ( i_level > 2047 ) )
? ( -4096 + i_level )
: i_level;*/
i_level = (b_sign = ( i_level > 2047 )) ? 4096 - i_level
: i_level;
break;
case DCT_EOB:
if( i_nc <= 1 )
{
p_mb->pf_idct[i_b] = vdec_SparseIDCT;
p_mb->pi_sparse_pos[i_b] = i_coef;
}
else
{
p_mb->pf_idct[i_b] = vdec_IDCT;
}
return;
break;
default:
b_sign = GetBits( &p_vpar->bit_stream, 1 );
}
i_coef = i_parse;
i_parse += i_run;
i_nc ++;
if( i_parse >= 64 )
{
break;
}
i_pos = pi_scan[p_vpar->picture.b_alternate_scan][i_parse];
i_level = ( i_level *
p_vpar->slice.i_quantizer_scale *
ppi_quant[i_quant_type][i_pos] ) >> 4;
p_mb->ppi_blocks[i_b][i_pos] = b_sign ? -i_level : i_level;
}
if( p_vpar->mb.i_mb_type & MB_MOTION_FORWARD )
intf_ErrMsg("vpar error: DCT coeff (intra) is out of bounds\n");
p_vpar->picture.b_error = 1;
}
/*
* Motion vectors
*/
/****************************************************************************
* MotionCode : Parse the next motion code
****************************************************************************/
static __inline__ int MotionCode( vpar_thread_t * p_vpar )
{
int i_code;
static lookup_t pl_mv_tab0[8] =
{ {-1,0}, {3,3}, {2,2}, {2,2}, {1,1}, {1,1}, {1,1}, {1,1} };
/* Table B-10, motion_code, codes 0000011 ... 000011x */
static lookup_t pl_mv_tab1[8] =
{ {-1,0}, {-1,0}, {-1,0}, {7,6}, {6,6}, {5,6}, {4,5}, {4,5} };
/* Table B-10, motion_code, codes 0000001100 ... 000001011x */
static lookup_t pl_mv_tab2[12] = {
{16,9}, {15,9}, {14,9}, {13,9},
{12,9}, {11,9}, {10,8}, {10,8},
{9,8}, {9,8}, {8,8}, {8,8} };
if( GetBits( &p_vpar->bit_stream, 1 ) )
{
return 0;
}
if( (i_code = ShowBits( &p_vpar->bit_stream, 9) ) >= 64 )
{
//fprintf( stderr, "motion !\n" );
(*p_vpar->sequence.pf_decode_mv)( p_vpar, p_mb, 0 );
i_code >>= 6;
RemoveBits( &p_vpar->bit_stream, pl_mv_tab0[i_code].i_length );
return( GetBits( &p_vpar->bit_stream, 1 ) ?
-pl_mv_tab0[i_code].i_value : pl_mv_tab0[i_code].i_value );
}
if( p_vpar->mb.i_mb_type & MB_MOTION_BACKWARD )
if( i_code >= 24 )
{
//fprintf( stderr, "motion2 !\n" );
(*p_vpar->sequence.pf_decode_mv)( p_vpar, p_mb, 1 );
i_code >>= 3;
RemoveBits( &p_vpar->bit_stream, pl_mv_tab1[i_code].i_length );
return( GetBits( &p_vpar->bit_stream, 1 ) ?
-pl_mv_tab1[i_code].i_value : pl_mv_tab1[i_code].i_value );
}
if( p_vpar->picture.b_concealment_mv && (p_vpar->mb.i_mb_type & MB_INTRA) )
if( (i_code -= 12) < 0 )
{
RemoveBits( &p_vpar->bit_stream, 1 );
p_vpar->picture.b_error = 1;
intf_DbgMsg( "vpar debug: Invalid motion_vector code\n" );
return 0;
}
if( 0 )
//i_count == 1231 &&
// i_count != *pi_mb_address)
//p_vpar->picture.i_coding_type == P_CODING_TYPE )
{
fprintf( stderr, "i_count = %d (%d)\n", i_count, p_vpar->mb.i_mb_type );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x ", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x\n", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x ", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x\n", GetBits( &p_vpar->bit_stream, 16 ) );
exit(0);
RemoveBits( &p_vpar->bit_stream, pl_mv_tab2[i_code].i_length );
return( GetBits( &p_vpar->bit_stream, 1 ) ?
-pl_mv_tab2[i_code].i_value : pl_mv_tab2[i_code].i_value );
}
if( p_vpar->mb.i_mb_type & MB_PATTERN )
/****************************************************************************
* DecodeMotionVector : Decode a motion_vector
****************************************************************************/
static __inline__ void DecodeMotionVector( int * pi_prediction, int i_r_size,
int i_motion_code, int i_motion_residual, int i_full_pel )
{
int i_limit, i_vector;
/* ISO/IEC 13818-1 section 7.6.3.1 */
i_limit = 16 << i_r_size;
i_vector = *pi_prediction >> i_full_pel;
if( i_motion_code > 0 )
{
p_mb->i_coded_block_pattern = p_vpar->mb.i_coded_block_pattern = (*p_vpar->sequence.pf_decode_pattern)( p_vpar );
//fprintf( stderr, "pattern : %d\n", p_vpar->mb.i_coded_block_pattern );
i_vector += ((i_motion_code-1) << i_r_size) + i_motion_residual + 1;
if( i_vector >= i_limit )
i_vector -= i_limit + i_limit;
}
else
else if( i_motion_code < 0 )
{
int pi_coded_block_pattern[2] = {0,
(1 << (4+p_vpar->sequence.i_chroma_nb_blocks)) - 1};
p_mb->i_coded_block_pattern = p_vpar->mb.i_coded_block_pattern = pi_coded_block_pattern
[p_vpar->mb.i_mb_type & MB_INTRA];
i_vector -= ((-i_motion_code-1) << i_r_size) + i_motion_residual + 1;
if( i_vector < -i_limit )
i_vector += i_limit + i_limit;
}
/*
* Effectively decode blocks.
*/
i_mask = 1 << (3 + p_vpar->sequence.i_chroma_nb_blocks);
/* luminance */
p_data1 = p_mb->p_picture->p_y
+ p_mb->i_l_x + p_mb->i_l_y*(p_vpar->sequence.i_width);
*pi_prediction = i_vector << i_full_pel;
}
for( i_b = 0 ; i_b < 4 ; i_b++, i_mask >>= 1 )
/****************************************************************************
* MotionVector : Parse the next motion_vector field
****************************************************************************/
static __inline__ void MotionVector( vpar_thread_t * p_vpar,
macroblock_t * p_mb, int i_r,
int i_s, int i_full_pel, int i_structure )
{
int i_motion_code, i_motion_residual;
int i_r_size;
int pi_dm_vector[2];
i_r_size = p_vpar->picture.ppi_f_code[i_s][0] - 1;
i_motion_code = MotionCode( p_vpar );
i_motion_residual = (i_r_size != 0 && i_motion_code != 0) ?
GetBits( &p_vpar->bit_stream, i_r_size) : 0;
DecodeMotionVector( &p_vpar->slice.pppi_pmv[i_r][i_s][0], i_r_size,
i_motion_code, i_motion_residual, i_full_pel );
p_mb->pppi_motion_vectors[i_r][i_s][0] = p_vpar->slice.pppi_pmv[i_r][i_s][0];
if( p_vpar->mb.b_dmv )
{
if( p_vpar->mb.i_coded_block_pattern & i_mask )
if( GetBits(&p_vpar->bit_stream, 1) )
{
memset( p_mb->ppi_blocks[i_b], 0, 64*sizeof(dctelem_t) );
(*pppf_decode_block[p_vpar->sequence.b_mpeg2]
[p_vpar->mb.i_mb_type & MB_INTRA])
( p_vpar, p_mb, i_b );
/* Calculate block coordinates. */
p_mb->p_data[i_b] = p_data1
+ pi_y[p_vpar->mb.b_dct_type][i_b]
* p_vpar->sequence.i_width
+ pi_x[i_b];
pi_dm_vector[0] = GetBits( &p_vpar->bit_stream, 1 ) ? -1 : 1;
}
else
{
pi_dm_vector[0] = 0;
}
}
i_r_size = p_vpar->picture.ppi_f_code[i_s][1]-1;
i_motion_code = MotionCode( p_vpar );
i_motion_residual = (i_r_size != 0 && i_motion_code != 0) ?
GetBits( &p_vpar->bit_stream, i_r_size) : 0;
/* chrominance */
p_data1 = p_mb->p_picture->p_u
+ p_mb->i_c_x
+ p_mb->i_c_y
* (p_vpar->sequence.i_chroma_width);
p_data2 = p_mb->p_picture->p_v
+ p_mb->i_c_x
+ p_mb->i_c_y
* (p_vpar->sequence.i_chroma_width);
if( (p_vpar->mb.i_mv_format == MOTION_FIELD)
&& (i_structure == FRAME_STRUCTURE) )
{
p_vpar->slice.pppi_pmv[i_r][i_s][1] >>= 1;
}
for( i_b = 4; i_b < 4 + p_vpar->sequence.i_chroma_nb_blocks;
i_b++, i_mask >>= 1 )
DecodeMotionVector( &p_vpar->slice.pppi_pmv[i_r][i_s][1], i_r_size,
i_motion_code, i_motion_residual, i_full_pel );
if( (p_vpar->mb.i_mv_format == MOTION_FIELD)
&& (i_structure == FRAME_STRUCTURE) )
p_vpar->slice.pppi_pmv[i_r][i_s][1] <<= 1;
p_mb->pppi_motion_vectors[i_r][i_s][1] = p_vpar->slice.pppi_pmv[i_r][i_s][1];
if( p_vpar->mb.b_dmv )
{
yuv_data_t * pp_data[2] = {p_data1, p_data2};
if( GetBits(&p_vpar->bit_stream, 1) )
{
pi_dm_vector[1] = GetBits( &p_vpar->bit_stream, 1 ) ? -1 : 1;
}
else
{
pi_dm_vector[1] = 0;
}
/* Dual Prime Arithmetic (ISO/IEC 13818-2 section 7.6.3.6). */
if( p_vpar->mb.i_coded_block_pattern & i_mask )
#define i_mv_x p_mb->pppi_motion_vectors[0][0][0]
if( i_structure == FRAME_STRUCTURE )
{
memset( p_mb->ppi_blocks[i_b], 0, 64*sizeof(dctelem_t) );
(*pppf_decode_block[p_vpar->sequence.b_mpeg2]
[p_vpar->mb.i_mb_type & MB_INTRA])
( p_vpar, p_mb, i_b );
#define i_mv_y (p_mb->pppi_motion_vectors[0][0][1] << 1)
if( p_vpar->picture.b_top_field_first )
{
/* vector for prediction of top field from bottom field */
p_mb->ppi_dmv[0][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[0][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] - 1;
/* Calculate block coordinates. */
p_mb->p_data[i_b] = pp_data[i_b & 1]
+ pi_y[p_vpar->mb.b_dct_type][i_b]
* p_vpar->sequence.i_chroma_width
+ pi_x[i_b];
/* vector for prediction of bottom field from top field */
p_mb->ppi_dmv[1][0] = ((3*i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[1][1] = ((3*i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] + 1;
}
else
{
/* vector for prediction of top field from bottom field */
p_mb->ppi_dmv[0][0] = ((3*i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[0][1] = ((3*i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] - 1;
/* vector for prediction of bottom field from top field */
p_mb->ppi_dmv[1][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[1][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] + 1;
}
#undef i_mv_y
}
else
{
#define i_mv_y p_mb->pppi_motion_vectors[0][0][1]
/* vector for prediction from field of opposite 'parity' */
p_mb->ppi_dmv[0][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[0][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1];
/* correct for vertical field shift */
if( p_vpar->picture.i_structure == TOP_FIELD )
p_mb->ppi_dmv[0][1]--;
else
p_mb->ppi_dmv[0][1]++;
#undef i_mv_y
}
#undef i_mv_x
}
}
if( !( p_vpar->mb.i_mb_type & MB_INTRA ) )
{
static int pi_dc_dct_reinit[4] = {128,256,512,1024};
/*****************************************************************************
* DecodeMVMPEG1 : Parse the next MPEG-1 motion vectors
*****************************************************************************/
static __inline__ void DecodeMVMPEG1( vpar_thread_t * p_vpar,
macroblock_t * p_mb, int i_s, int i_structure )
{
MotionVector( p_vpar, p_mb, 0, i_s,
p_vpar->picture.pb_full_pel_vector[i_s], i_structure );
}
/* Reset DC predictors (7.2.1). */
p_vpar->slice.pi_dc_dct_pred[0] = p_vpar->slice.pi_dc_dct_pred[1]
= p_vpar->slice.pi_dc_dct_pred[2]
= pi_dc_dct_reinit[p_vpar->picture.i_intra_dc_precision];
}
else if( !p_vpar->picture.b_concealment_mv )
/*****************************************************************************
* DecodeMVMPEG2 : Parse the next MPEG-2 motion_vectors field
*****************************************************************************/
static __inline__ void DecodeMVMPEG2( vpar_thread_t * p_vpar,
macroblock_t * p_mb, int i_s, int i_structure )
{
if( p_vpar->mb.i_mv_count == 1 )
{
/* Reset MV predictors. */
memset( p_vpar->slice.pppi_pmv, 0, 8*sizeof(int) );
if( p_vpar->mb.i_mv_format == MOTION_FIELD && !p_vpar->mb.b_dmv )
{
p_mb->ppi_field_select[0][i_s] = p_mb->ppi_field_select[1][i_s]
= GetBits( &p_vpar->bit_stream, 1 );
}
MotionVector( p_vpar, p_mb, 0, i_s, 0, i_structure );
p_vpar->slice.pppi_pmv[1][i_s][0] = p_vpar->slice.pppi_pmv[0][i_s][0];
p_vpar->slice.pppi_pmv[1][i_s][1] = p_vpar->slice.pppi_pmv[0][i_s][1];
p_mb->pppi_motion_vectors[1][i_s][0] = p_vpar->slice.pppi_pmv[0][i_s][0];
p_mb->pppi_motion_vectors[1][i_s][1] = p_vpar->slice.pppi_pmv[0][i_s][1];
}
if( p_mb->b_P_coding_type && !(p_vpar->mb.i_mb_type & (MB_MOTION_FORWARD|MB_INTRA)) )
else
{
p_mb->i_mb_type |= MB_MOTION_FORWARD;
p_mb->ppi_field_select[0][i_s] = GetBits( &p_vpar->bit_stream, 1 );
MotionVector( p_vpar, p_mb, 0, i_s, 0, i_structure );
p_mb->ppi_field_select[1][i_s] = GetBits( &p_vpar->bit_stream, 1 );
MotionVector( p_vpar, p_mb, 1, i_s, 0, i_structure );
}
}
#ifndef VDEC_SMP
/* Decode the macroblock NOW ! */
vdec_DecodeMacroblock( p_vpar->pp_vdec[0], p_mb );
#endif
/*
if( p_vpar->picture.i_coding_type != I_CODING_TYPE )//!(p_mb->b_P_coding_type & MB_INTRA) )
{
p_mb->i_mb_type |= MB_MOTION_FORWARD;
}
*/
if( 0 )
//i_count == 249)
// i_count != *pi_mb_address)
//b_stop )
* Macroblock information structures
*/
/*****************************************************************************
* MacroblockAddressIncrement : Get the macroblock_address_increment field
*****************************************************************************/
static __inline__ int MacroblockAddressIncrement( vpar_thread_t * p_vpar )
{
fprintf( stderr, "i_count = %d (%d)\n", i_count, i_inc );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x ", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x\n", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x ", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x", GetBits( &p_vpar->bit_stream, 16 ) );
fprintf( stderr, "%x\n", GetBits( &p_vpar->bit_stream, 16 ) );
exit(0);
}
int i_addr_inc = 0;
/* Index in the lookup table mb_addr_inc */
int i_index = ShowBits( &p_vpar->bit_stream, 11 );
/* Test the presence of the escape character */
while( i_index == 8 )
{
RemoveBits( &p_vpar->bit_stream, 11 );
i_addr_inc += 33;
i_index = ShowBits( &p_vpar->bit_stream, 11 );
}
/* Affect the value from the lookup table */
i_addr_inc += p_vpar->pl_mb_addr_inc[i_index].i_value;
/* Dump the good number of bits */
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_mb_addr_inc[i_index].i_length );
return i_addr_inc;
}
/*****************************************************************************
* vpar_IMBType : macroblock_type in I pictures
* IMBType : macroblock_type in I pictures
*****************************************************************************/
int vpar_IMBType( vpar_thread_t * p_vpar )
static __inline__ int IMBType( vpar_thread_t * p_vpar )
{
/* Take two bits for testing */
int i_type = ShowBits( &p_vpar->bit_stream, 2 );
......@@ -1010,50 +1233,13 @@ int vpar_IMBType( vpar_thread_t * p_vpar )
}
/*****************************************************************************
* vpar_PMBType : macroblock_type in P pictures
* PMBType : macroblock_type in P pictures
*****************************************************************************/
int vpar_PMBType( vpar_thread_t * p_vpar )
static __inline__ int PMBType( vpar_thread_t * p_vpar )
{
/* Testing on 6 bits */
int i_type = ShowBits( &p_vpar->bit_stream, 6 );
#if 0
/* Table B-3, macroblock_type in P-pictures, codes 001..1xx */
static lookup_t PMBtab0[8] = {
{-1,0},
{MB_MOTION_FORWARD,3},
{MB_PATTERN,2}, {MB_PATTERN,2},
{MB_MOTION_FORWARD|MB_PATTERN,1},
{MB_MOTION_FORWARD|MB_PATTERN,1},
{MB_MOTION_FORWARD|MB_PATTERN,1},
{MB_MOTION_FORWARD|MB_PATTERN,1}
};
/* Table B-3, macroblock_type in P-pictures, codes 000001..00011x */
static lookup_t PMBtab1[8] = {
{-1,0},
{MB_QUANT|MB_INTRA,6},
{MB_QUANT|MB_PATTERN,5}, {MB_QUANT|MB_PATTERN,5},
{MB_QUANT|MB_MOTION_FORWARD|MB_PATTERN,5}, {MB_QUANT|MB_MOTION_FORWARD|MB_PATTERN,5},
{MB_INTRA,5}, {MB_INTRA,5}
};
if(i_type >= 8)
{
i_type >>= 3;
RemoveBits( &p_vpar->bit_stream,PMBtab0[i_type].i_length );
return PMBtab0[i_type].i_value;
}
if (i_type==0)
{
printf("Invalid P macroblock_type code\n");
return -1;
}
RemoveBits( &p_vpar->bit_stream,PMBtab1[i_type].i_length );
return PMBtab1[i_type].i_value;
#endif
/* Dump the good number of bits */
RemoveBits( &p_vpar->bit_stream, p_vpar->ppl_mb_type[0][i_type].i_length );
/* return the value from the lookup table for P type */
......@@ -1061,9 +1247,9 @@ static lookup_t PMBtab1[8] = {
}
/*****************************************************************************
* vpar_BMBType : macroblock_type in B pictures
* BMBType : macroblock_type in B pictures
*****************************************************************************/
int vpar_BMBType( vpar_thread_t * p_vpar )
static __inline__ int BMBType( vpar_thread_t * p_vpar )
{
/* Testing on 6 bits */
int i_type = ShowBits( &p_vpar->bit_stream, 6 );
......@@ -1076,28 +1262,22 @@ int vpar_BMBType( vpar_thread_t * p_vpar )
}
/*****************************************************************************
* vpar_DMBType : macroblock_type in D pictures
* DMBType : macroblock_type in D pictures
*****************************************************************************/
int vpar_DMBType( vpar_thread_t * p_vpar )
static __inline__ int DMBType( vpar_thread_t * p_vpar )
{
/* Taking 1 bit */
int i_type = GetBits( &p_vpar->bit_stream, 1 );
/* Lookup table */
static int pi_mb_Dtype[2] = { MB_ERROR, 1 };
return pi_mb_Dtype[i_type];
return GetBits( &p_vpar->bit_stream, 1 );
}
/*****************************************************************************
* vpar_CodedPattern420 : coded_block_pattern with 420 chroma
* CodedPattern420 : coded_block_pattern with 4:2:0 chroma
*****************************************************************************/
int vpar_CodedPattern420( vpar_thread_t * p_vpar )
static __inline__ int CodedPattern420( vpar_thread_t * p_vpar )
{
/* Take the max 9 bits length vlc code for testing */
int i_vlc = ShowBits( &p_vpar->bit_stream, 9 );
/* Trash the good number of bits read in the lookup table */
/* Trash the good number of bits read in the lookup table */
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_coded_pattern[i_vlc].i_length );
/* return the value from the vlc table */
......@@ -1105,414 +1285,600 @@ int vpar_CodedPattern420( vpar_thread_t * p_vpar )
}
/*****************************************************************************
* vpar_CodedPattern422 : coded_block_pattern with 422 chroma
* CodedPattern422 : coded_block_pattern with 4:2:2 chroma
*****************************************************************************/
static __inline__ int CodedPattern422( vpar_thread_t * p_vpar )
{
int i_vlc = ShowBits( &p_vpar->bit_stream, 9 );
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_coded_pattern[i_vlc].i_length );
/* Supplementary 2 bits long code for 4:2:2 format */
return p_vpar->pl_coded_pattern[i_vlc].i_value |
(GetBits( &p_vpar->bit_stream, 2 ) << 6);
}
/*****************************************************************************
* CodedPattern444 : coded_block_pattern with 4:4:4 chroma
*****************************************************************************/
static __inline__ int CodedPattern444( vpar_thread_t * p_vpar )
{
int i_vlc = ShowBits( &p_vpar->bit_stream, 9 );
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_coded_pattern[i_vlc].i_length );
return p_vpar->pl_coded_pattern[i_vlc].i_value |
(GetBits( &p_vpar->bit_stream, 6 ) << 6);
}
/*****************************************************************************
* InitMacroblock : Initialize macroblock values
*****************************************************************************/
static __inline__ void InitMacroblock( vpar_thread_t * p_vpar,
macroblock_t * p_mb, int i_coding_type,
int i_structure,
boolean_t b_second_field )
{
p_mb->i_chroma_nb_blocks = p_vpar->sequence.i_chroma_nb_blocks;
p_mb->p_picture = p_vpar->picture.p_picture;
if( i_coding_type == B_CODING_TYPE )
p_mb->p_backward = p_vpar->sequence.p_backward;
else
p_mb->p_backward = NULL;
if( (i_coding_type == P_CODING_TYPE) || (i_coding_type == B_CODING_TYPE) )
p_mb->p_forward = p_vpar->sequence.p_forward;
else
p_mb->p_forward = NULL;
p_mb->i_l_x = p_vpar->mb.i_l_x;
p_mb->i_c_x = p_vpar->mb.i_c_x;
p_mb->i_motion_l_y = p_vpar->mb.i_l_y;
p_mb->i_motion_c_y = p_vpar->mb.i_c_y;
if( (p_mb->b_motion_field = (i_structure == BOTTOM_FIELD)) )
{
p_mb->i_motion_l_y--;
p_mb->i_motion_c_y--;
}
p_mb->i_addb_l_stride = (p_mb->i_l_stride = p_vpar->picture.i_l_stride) - 8;
p_mb->i_addb_c_stride = (p_mb->i_c_stride = p_vpar->picture.i_c_stride) - 8;
p_mb->b_P_second = ( b_second_field && i_coding_type == P_CODING_TYPE );
}
/*****************************************************************************
* UpdateContext : Update the p_vpar contextual values
*****************************************************************************/
int vpar_CodedPattern422( vpar_thread_t * p_vpar )
static __inline__ void UpdateContext( vpar_thread_t * p_vpar, int i_structure )
{
int i_vlc = ShowBits( &p_vpar->bit_stream, 9 );
/* Supplementary 2 bits long code for 422 format */
int i_coded_block_pattern_1;
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_coded_pattern[i_vlc].i_length );
i_coded_block_pattern_1 = GetBits( &p_vpar->bit_stream, 2 );
/* the code is just to be added to the value found in the table */
return p_vpar->pl_coded_pattern[i_vlc].i_value |
(i_coded_block_pattern_1 << 6);
}
/* Update macroblock real position. */
p_vpar->mb.i_l_x += 16;
p_vpar->mb.i_l_y += (p_vpar->mb.i_l_x / p_vpar->sequence.i_width)
* (2 - (i_structure == FRAME_STRUCTURE)) * 16;
p_vpar->mb.i_l_x %= p_vpar->sequence.i_width;
/*****************************************************************************
* vpar_CodedPattern444 : coded_block_pattern with 444 chroma
*****************************************************************************/
int vpar_CodedPattern444( vpar_thread_t * p_vpar )
{
int i_vlc = ShowBits( &p_vpar->bit_stream, 9 );
int i_coded_block_pattern_2;
RemoveBits( &p_vpar->bit_stream, p_vpar->pl_coded_pattern[i_vlc].i_length );
i_coded_block_pattern_2 = GetBits( &p_vpar->bit_stream, 6 );
return p_vpar->pl_coded_pattern[i_vlc].i_value |
( i_coded_block_pattern_2 << 6 );
p_vpar->mb.i_c_x += p_vpar->sequence.i_chroma_mb_width;
p_vpar->mb.i_c_y += (p_vpar->mb.i_c_x / p_vpar->sequence.i_chroma_width)
* (2 - (i_structure == FRAME_STRUCTURE))
* p_vpar->sequence.i_chroma_mb_height;
p_vpar->mb.i_c_x %= p_vpar->sequence.i_chroma_width;
}
/*****************************************************************************
* vpar_DecodeMPEG1Non : decode MPEG-1 non-intra blocks
* SkippedMacroblock : Generate a skipped macroblock with NULL motion vector
*****************************************************************************/
static void vpar_DecodeMPEG1Non( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
static __inline__ void SkippedMacroblock( vpar_thread_t * p_vpar, int i_mb,
int i_mb_base, int i_coding_type,
int i_chroma_format,
int i_structure,
boolean_t b_second_field )
{
macroblock_t * p_mb;
if( p_vpar->picture.i_coding_type == D_CODING_TYPE )
static f_motion_t pf_motion_skipped[4][4] =
{
/* Remove end_of_macroblock (always 1, prevents startcode emulation)
* ISO/IEC 11172-2 section 2.4.2.7 and 2.4.3.6 */
RemoveBits( &p_vpar->bit_stream, 1 );
{NULL, NULL, NULL, NULL},
{NULL, vdec_MotionFieldField420, vdec_MotionFieldField420,
vdec_MotionFrameFrame420},
{NULL, vdec_MotionFieldField422, vdec_MotionFieldField422,
vdec_MotionFrameFrame422},
{NULL, vdec_MotionFieldField444, vdec_MotionFieldField444,
vdec_MotionFrameFrame444},
};
if( (p_mb = vpar_NewMacroblock( &p_vpar->vfifo )) == NULL )
{
/* b_die == 1 */
return;
}
#ifdef VDEC_SMP
p_vpar->picture.pp_mb[i_mb_base + i_mb] = p_mb;
#endif
InitMacroblock( p_vpar, p_mb, i_coding_type, i_structure, b_second_field );
/* Motion type is picture structure. */
p_mb->pf_motion = pf_motion_skipped[i_chroma_format]
[i_structure];
p_mb->i_mb_type = MB_MOTION_FORWARD;
p_mb->i_coded_block_pattern = 0;
memset( p_mb->pppi_motion_vectors, 0, 8*sizeof(int) );
/* Set the field we use for motion compensation */
p_mb->ppi_field_select[0][0] = p_mb->ppi_field_select[0][1]
= ( i_structure == BOTTOM_FIELD );
UpdateContext( p_vpar, i_structure );
#ifndef VDEC_SMP
/* Decode the macroblock NOW ! */
vdec_DecodeMacroblock( p_vpar->pp_vdec[0], p_mb );
#endif
}
/*****************************************************************************
* vpar_DecodeMPEG1Intra : decode MPEG-1 intra blocks
* MacroblockModes : Get the macroblock_modes structure
*****************************************************************************/
static void vpar_DecodeMPEG1Intra( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
static __inline__ void MacroblockModes( vpar_thread_t * p_vpar,
macroblock_t * p_mb,
int i_chroma_format,
int i_coding_type,
int i_structure )
{
static int ppi_mv_count[2][4] = { {0, 1, 2, 1}, {0, 2, 1, 1} };
static int ppi_mv_format[2][4] = { {0, 1, 1, 1}, {0, 1, 2, 1} };
if( p_vpar->picture.i_coding_type == D_CODING_TYPE )
/* Get macroblock_type. */
switch( i_coding_type )
{
/* Remove end_of_macroblock (always 1, prevents startcode emulation)
* ISO/IEC 11172-2 section 2.4.2.7 and 2.4.3.6 */
RemoveBits( &p_vpar->bit_stream, 1 );
case P_CODING_TYPE:
p_mb->i_mb_type = PMBType( p_vpar );
break;
case B_CODING_TYPE:
p_mb->i_mb_type = BMBType( p_vpar );
break;
case I_CODING_TYPE:
p_mb->i_mb_type = IMBType( p_vpar );
break;
case D_CODING_TYPE:
p_mb->i_mb_type = DMBType( p_vpar );
}
/* SCALABILITY : warning, we don't know if spatial_temporal_weight_code
* has to be dropped, take care if you use scalable streams. */
/* RemoveBits( &p_vpar->bit_stream, 2 ); */
if( p_mb->i_mb_type & (MB_MOTION_FORWARD | MB_MOTION_BACKWARD) )
{
if( !(i_structure == FRAME_STRUCTURE
&& p_vpar->picture.b_frame_pred_frame_dct) )
{
p_vpar->mb.i_motion_type = GetBits( &p_vpar->bit_stream, 2 );
}
else
{
p_vpar->mb.i_motion_type = MOTION_FRAME;
}
}
/* ???? */
p_vpar->mb.i_mv_count = ppi_mv_count[i_structure == FRAME_STRUCTURE]
[p_vpar->mb.i_motion_type];
p_vpar->mb.i_mv_format = ppi_mv_format[i_structure == FRAME_STRUCTURE]
[p_vpar->mb.i_motion_type];
p_vpar->mb.b_dmv = p_vpar->mb.i_motion_type == MOTION_DMV;
p_vpar->mb.b_dct_type = 0;
if( (i_structure == FRAME_STRUCTURE) &&
(!p_vpar->picture.b_frame_pred_frame_dct) &&
(p_mb->i_mb_type & (MB_PATTERN|MB_INTRA)) )
{
if( (p_vpar->mb.b_dct_type = GetBits( &p_vpar->bit_stream, 1 )) )
{
/* The DCT is coded on fields. Jump one line between each
* sample. */
p_mb->i_addb_l_stride <<= 1;
p_mb->i_addb_l_stride += 8;
/* With CHROMA_420, the DCT is necessarily frame-coded. */
if( i_chroma_format != CHROMA_420 )
{
p_mb->i_addb_c_stride <<= 1;
p_mb->i_addb_c_stride += 8;
}
}
}
}
/*****************************************************************************
* vpar_DecodeMPEG2Non : decode MPEG-2 non-intra blocks
* ParseMacroblock : Parse the next macroblock
*****************************************************************************/
static void vpar_DecodeMPEG2Non( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
static __inline__ void ParseMacroblock(
vpar_thread_t * p_vpar,
int * pi_mb_address, /* previous address to be
* used for mb_addr_incr */
int i_mb_previous, /* actual previous mb */
int i_mb_base, /* non-zero if field structure */
/* The following parameters are explicit in
* optimized routines : */
boolean_t b_mpeg2, /* you know what ? */
int i_coding_type, /* I, P, B or D */
int i_chroma_format, /* 4:2:0, 4:2:2 or 4:4:4 */
int i_structure, /* T(OP), B(OTTOM) or F(RAME) */
boolean_t b_second_field ) /* second field of a
* field picture */
{
int i_parse;
int i_nc;
int i_cc;
int i_coef;
int i_type;
int i_code;
int i_length;
int i_pos;
int i_run;
int i_level;
int i_quant_type;
boolean_t b_sign;
int * ppi_quant[2];
/* Lookup Table for the chromatic component */
static int pi_cc_index[12] = { 0, 0, 0, 0, 1, 2, 1, 2, 1, 2 };
static f_motion_t pppf_motion[4][2][4] =
{
{ {NULL, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL}
},
{ {NULL, vdec_MotionFieldField420, vdec_MotionField16x8420,
vdec_MotionFieldDMV420},
{NULL, vdec_MotionFrameField420, vdec_MotionFrameFrame420,
vdec_MotionFrameDMV420}
},
{ {NULL, vdec_MotionFieldField422, vdec_MotionField16x8422,
vdec_MotionFieldDMV422},
{NULL, vdec_MotionFrameField422, vdec_MotionFrameFrame422,
vdec_MotionFrameDMV422}
},
{ {NULL, vdec_MotionFieldField444, vdec_MotionField16x8444,
vdec_MotionFieldDMV444},
{NULL, vdec_MotionFrameField444, vdec_MotionFrameFrame444,
vdec_MotionFrameDMV444}
}
};
static f_decode_block_t pppf_decode_block[2][2] =
{ {vpar_DecodeMPEG1Non, vpar_DecodeMPEG1Intra},
{vpar_DecodeMPEG2Non, vpar_DecodeMPEG2Intra} };
static int pi_x[12] = {0,8,0,8,0,0,0,0,8,8,8,8};
static int pi_y[2][12] = { {0,0,8,8,0,0,8,8,0,0,8,8},
{0,0,1,1,0,0,1,1,0,0,1,1} };
static int pi_dc_dct_reinit[4] = {128,256,512,1024};
i_cc = pi_cc_index[i_b];
int i_mb, i_b, i_mask;
macroblock_t * p_mb;
yuv_data_t * p_data1;
yuv_data_t * p_data2;
/* Determine whether it is luminance or not (chrominance) */
i_type = ( i_cc + 1 ) >> 1;
i_quant_type = (!i_type) || (p_vpar->sequence.i_chroma_format == CHROMA_420);
/* Give a pointer to the quantization matrices for intra blocks */
ppi_quant[1] = p_vpar->sequence.nonintra_quant.pi_matrix;
ppi_quant[0] = p_vpar->sequence.chroma_nonintra_quant.pi_matrix;
*pi_mb_address += MacroblockAddressIncrement( p_vpar );
/* Decoding of the AC coefficients */
i_nc = 0;
i_coef = 0;
for( i_parse = 0; ; i_parse++ )
if( *pi_mb_address - i_mb_previous - 1 )
{
i_code = ShowBits( &p_vpar->bit_stream, 16 );
if( i_code >= 16384 )
{
if( i_parse == 0 )
{
i_run = pl_DCT_tab_dc[(i_code>>12)-4].i_run;
i_level = pl_DCT_tab_dc[(i_code>>12)-4].i_level;
i_length = pl_DCT_tab_dc[(i_code>>12)-4].i_length;
}
else
{
i_run = pl_DCT_tab_ac[(i_code>>12)-4].i_run;
i_level = pl_DCT_tab_ac[(i_code>>12)-4].i_level;
i_length = pl_DCT_tab_ac[(i_code>>12)-4].i_length;
}
}
else if( i_code >= 1024 )
/* Skipped macroblock (ISO/IEC 13818-2 7.6.6). */
/* Reset DC predictors (7.2.1). */
p_vpar->slice.pi_dc_dct_pred[0] = p_vpar->slice.pi_dc_dct_pred[1]
= p_vpar->slice.pi_dc_dct_pred[2]
= pi_dc_dct_reinit[p_vpar->picture.i_intra_dc_precision];
if( i_coding_type == P_CODING_TYPE )
{
i_run = pl_DCT_tab0[(i_code>>8)-4].i_run;
i_length = pl_DCT_tab0[(i_code>>8)-4].i_length;
i_level = pl_DCT_tab0[(i_code>>8)-4].i_level;
/* Reset motion vector predictors (ISO/IEC 13818-2 7.6.3.4). */
memset( p_vpar->slice.pppi_pmv, 0, 8*sizeof(int) );
}
else
for( i_mb = i_mb_previous + 1; i_mb < *pi_mb_address; i_mb++ )
{
i_run = ppl_dct_coef[0][i_code].i_run;
i_length = ppl_dct_coef[0][i_code].i_length;
i_level = ppl_dct_coef[0][i_code].i_level;
SkippedMacroblock( p_vpar, i_mb, i_mb_base, i_coding_type,
i_chroma_format, i_structure, b_second_field );
}
}
RemoveBits( &p_vpar->bit_stream, i_length );
/* Get a macroblock structure. */
if( (p_mb = vpar_NewMacroblock( &p_vpar->vfifo )) == NULL )
{
/* b_die == 1 */
return;
}
#ifdef VDEC_SMP
p_vpar->picture.pp_mb[i_mb_base + *pi_mb_address] = p_mb;
#endif
switch( i_run )
{
case DCT_ESCAPE:
i_run = GetBits( &p_vpar->bit_stream, 6 );
i_level = GetBits( &p_vpar->bit_stream, 12 );
i_level = (b_sign = ( i_level > 2047 )) ? 4096 - i_level
: i_level;
break;
case DCT_EOB:
if( i_nc <= 1 )
{
p_mb->pf_idct[i_b] = vdec_SparseIDCT;
p_mb->pi_sparse_pos[i_b] = i_coef;
}
else
{
p_mb->pf_idct[i_b] = vdec_IDCT;
}
return;
InitMacroblock( p_vpar, p_mb, i_coding_type, i_structure, b_second_field );
break;
default:
b_sign = GetBits( &p_vpar->bit_stream, 1 );
}
i_coef = i_parse;
i_parse += i_run;
i_nc ++;
/* Parse off macroblock_modes structure. */
MacroblockModes( p_vpar, p_mb, i_chroma_format, i_coding_type,
i_structure );
if( i_parse >= 64 )
{
break;
}
i_pos = pi_scan[p_vpar->picture.b_alternate_scan][i_parse];
i_level = ( ((i_level << 1) + 1) * p_vpar->slice.i_quantizer_scale
* ppi_quant[i_quant_type][i_pos] ) >> 5;
p_mb->ppi_blocks[i_b][i_pos] = b_sign ? -i_level : i_level;
if( p_mb->i_mb_type & MB_QUANT )
{
LoadQuantizerScale( p_vpar );
}
fprintf( stderr, "Non intra MPEG2 end (%d)\n", i_b );
p_vpar->picture.b_error = 1;
}
/*****************************************************************************
* vpar_DecodeMPEG2Intra : decode MPEG-2 intra blocks
*****************************************************************************/
static void vpar_DecodeMPEG2Intra( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_b )
{
int i_parse;
int i_nc;
int i_cc;
int i_coef;
int i_type, i_quant_type;
int i_code;
int i_length;
int i_pos;
int i_dct_dc_size;
int i_dct_dc_diff;
int i_run;
int i_level;
boolean_t b_vlc_intra;
boolean_t b_sign;
int * ppi_quant[2];
/* Lookup Table for the chromatic component */
static int pi_cc_index[12] = { 0, 0, 0, 0, 1, 2, 1, 2, 1, 2 };
i_cc = pi_cc_index[i_b];
if( (i_coding_type == P_CODING_TYPE || i_coding_type == B_CODING_TYPE)
&& (p_mb->i_mb_type & MB_MOTION_FORWARD) )
{
if( b_mpeg2 )
DecodeMVMPEG2( p_vpar, p_mb, 0, i_structure );
else
DecodeMVMPEG1( p_vpar, p_mb, 0, i_structure );
}
/* Determine whether it is luminance or not (chrominance) */
i_type = ( i_cc + 1 ) >> 1;
i_quant_type = (!i_type) | (p_vpar->sequence.i_chroma_format == CHROMA_420);
if( (i_coding_type == B_CODING_TYPE)
&& (p_mb->i_mb_type & MB_MOTION_BACKWARD) )
{
if( b_mpeg2 )
DecodeMVMPEG2( p_vpar, p_mb, 1, i_structure );
else
DecodeMVMPEG1( p_vpar, p_mb, 1, i_structure );
}
/* Give a pointer to the quantization matrices for intra blocks */
ppi_quant[1] = p_vpar->sequence.intra_quant.pi_matrix;
ppi_quant[0] = p_vpar->sequence.chroma_intra_quant.pi_matrix;
if( i_coding_type == P_CODING_TYPE
&& !(p_mb->i_mb_type & (MB_MOTION_FORWARD|MB_INTRA)) )
{
/* Special No-MC macroblock in P pictures (7.6.3.5). */
p_mb->i_mb_type |= MB_MOTION_FORWARD;
memset( p_vpar->slice.pppi_pmv, 0, 8*sizeof(int) );
memset( p_mb->pppi_motion_vectors, 0, 8*sizeof(int) );
p_vpar->mb.i_motion_type = 1 + (i_structure == FRAME_STRUCTURE);
p_mb->ppi_field_select[0][0] = (i_structure == BOTTOM_FIELD);
}
#if 0
/* Decoding of the DC intra coefficient */
/* The nb of bits to parse depends on i_type */
i_code = ShowBits( &p_vpar->bit_stream, 9 + i_type );
/* To reduce memory occupation, there are two lookup tables
* See InitDCT above */
i_code5 = i_code >> (4+i_type);
/* Shall we lookup in the first or in the second table ? */
i_select = ( i_code5 == 31 );
/* Offset value for looking in the second table */
i_offset = 0x1f0 + ( i_type * 0x1f0 );
i_pos = ( i_code5 * ( ! i_select ) ) + ( ( i_code - i_offset ) * i_select );
i_dct_dc_size = p_vpar->pppl_dct_dc_size[i_type][i_select][i_pos].i_value;
#endif
if( !i_type/*i_b < 4*/ )
if( !(p_mb->i_mb_type & MB_INTRA) )
{
/* decode length */
i_code = ShowBits(&p_vpar->bit_stream, 5);
if (i_code<31)
/* Reset DC predictors (7.2.1). */
p_vpar->slice.pi_dc_dct_pred[0] = p_vpar->slice.pi_dc_dct_pred[1]
= p_vpar->slice.pi_dc_dct_pred[2]
= pi_dc_dct_reinit[p_vpar->picture.i_intra_dc_precision];
/* Motion function pointer. */
p_mb->pf_motion = pppf_motion[i_chroma_format]
[i_structure == FRAME_STRUCTURE]
[p_vpar->mb.i_motion_type];
if( p_mb->i_mb_type & MB_PATTERN )
{
i_dct_dc_size = pl_dct_dc_lum_init_table_1[i_code].i_value;
i_length = pl_dct_dc_lum_init_table_1[i_code].i_length;
RemoveBits( &p_vpar->bit_stream, i_length);
switch( i_chroma_format )
{
case CHROMA_420:
p_mb->i_coded_block_pattern = CodedPattern420( p_vpar );
break;
case CHROMA_422:
p_mb->i_coded_block_pattern = CodedPattern422( p_vpar );
break;
case CHROMA_444:
p_mb->i_coded_block_pattern = CodedPattern444( p_vpar );
}
}
else
{
i_code = ShowBits(&p_vpar->bit_stream, 9) - 0x1f0;
i_dct_dc_size = pl_dct_dc_lum_init_table_2[i_code].i_value;
i_length = pl_dct_dc_lum_init_table_2[i_code].i_length;
RemoveBits( &p_vpar->bit_stream, i_length);
p_mb->i_coded_block_pattern = 0;
}
}
else
{
/* decode length */
i_code = ShowBits(&p_vpar->bit_stream, 5);
if (i_code<31)
if( !p_vpar->picture.b_concealment_mv )
{
i_dct_dc_size = pl_dct_dc_chrom_init_table_1[i_code].i_value;
i_length = pl_dct_dc_chrom_init_table_1[i_code].i_length;
RemoveBits(&p_vpar->bit_stream, i_length);
/* Reset MV predictors. */
memset( p_vpar->slice.pppi_pmv, 0, 8*sizeof(int) );
}
else
{
i_code = ShowBits(&p_vpar->bit_stream, 10) - 0x3e0;
i_dct_dc_size = pl_dct_dc_chrom_init_table_2[i_code].i_value;
i_length = pl_dct_dc_chrom_init_table_2[i_code].i_length;
RemoveBits( &p_vpar->bit_stream, i_length);
if( b_mpeg2 )
DecodeMVMPEG2( p_vpar, p_mb, 0, i_structure );
else
DecodeMVMPEG1( p_vpar, p_mb, 0, i_structure );
RemoveBits( &p_vpar->bit_stream, 1 );
}
}
if (i_dct_dc_size==0)
i_dct_dc_diff = 0;
else
{
i_dct_dc_diff = GetBits( &p_vpar->bit_stream, i_dct_dc_size);
if ((i_dct_dc_diff & (1<<(i_dct_dc_size-1)))==0)
i_dct_dc_diff-= (1<<i_dct_dc_size) - 1;
}
/* Dump the variable length code */
//RemoveBits( &p_vpar->bit_stream,
// p_vpar->pppl_dct_dc_size[i_type][i_select][i_pos].i_length );
/* Read the actual code with the good length */
p_vpar->slice.pi_dc_dct_pred[i_cc] += i_dct_dc_diff;
p_mb->ppi_blocks[i_b][0] = ( p_vpar->slice.pi_dc_dct_pred[i_cc] <<
( 3 - p_vpar->picture.i_intra_dc_precision ) );
i_nc = ( p_vpar->slice.pi_dc_dct_pred[i_cc] != 0 );
//fprintf( stderr, "coucou\n" );
/* Decoding of the AC coefficients */
i_coef = 0;
b_vlc_intra = p_vpar->picture.b_intra_vlc_format;
for( i_parse = 1; /*i_parse < 64*/; i_parse++ )
{
i_code = ShowBits( &p_vpar->bit_stream, 16 );
if( i_code >= 16384 )
if( p_mb->i_mb_type & MB_PATTERN )
{
if( b_vlc_intra )
switch( i_chroma_format )
{
i_run = pl_DCT_tab0a[(i_code>>8)-4].i_run;
i_level = pl_DCT_tab0a[(i_code>>8)-4].i_level;
i_length = pl_DCT_tab0a[(i_code>>8)-4].i_length;
//fprintf( stderr, "**********> %d, %d, %d *******\n", i_run, i_level, (i_code>>8)-4 );
case CHROMA_420:
p_mb->i_coded_block_pattern = CodedPattern420( p_vpar );
break;
case CHROMA_422:
p_mb->i_coded_block_pattern = CodedPattern422( p_vpar );
break;
case CHROMA_444:
p_mb->i_coded_block_pattern = CodedPattern444( p_vpar );
}
else
{
i_run = pl_DCT_tab_ac[(i_code>>12)-4].i_run;
i_level = pl_DCT_tab_ac[(i_code>>12)-4].i_level;
i_length = pl_DCT_tab_ac[(i_code>>12)-4].i_length;
}
}
else
{
i_run = ppl_dct_coef[b_vlc_intra][i_code].i_run;
i_length = ppl_dct_coef[b_vlc_intra][i_code].i_length;
i_level = ppl_dct_coef[b_vlc_intra][i_code].i_level;
p_mb->i_coded_block_pattern =
(1 << (4 + p_mb->i_chroma_nb_blocks)) - 1;
}
}
#if 0
{
int code = i_code;
int intra_vlc_format = b_vlc_intra;
dct_lookup_t tab;
if (code>=16384 && !intra_vlc_format)
tab = pl_DCT_tab_ac[(code>>12)-4];
else if (code>=1024)
if( p_vpar->picture.b_error )
{
if (intra_vlc_format)
tab = pl_DCT_tab0a[(code>>8)-4];
else
tab = pl_DCT_tab0[(code>>8)-4];
/* Mark this block as skipped (better than green blocks), and go
* to the next slice. */
(*pi_mb_address)--;
vpar_DestroyMacroblock( &p_vpar->vfifo, p_mb );
return;
}
else if (code>=512)
/*
* Effectively decode blocks.
*/
i_mask = 1 << (3 + p_mb->i_chroma_nb_blocks);
/* luminance */
p_data1 = p_mb->p_picture->p_y
+ p_mb->i_l_x + p_vpar->mb.i_l_y*(p_vpar->sequence.i_width);
for( i_b = 0 ; i_b < 4 ; i_b++, i_mask >>= 1 )
{
if (intra_vlc_format)
tab = pl_DCT_tab1a[(code>>6)-8];
else
tab = pl_DCT_tab1[(code>>6)-8];
if( p_mb->i_coded_block_pattern & i_mask )
{
memset( p_mb->ppi_blocks[i_b], 0, 64*sizeof(dctelem_t) );
(*pppf_decode_block[b_mpeg2]
[p_mb->i_mb_type & MB_INTRA])
( p_vpar, p_mb, i_b );
/* Calculate block coordinates. */
p_mb->p_data[i_b] = p_data1
+ pi_y[p_vpar->mb.b_dct_type][i_b]
* p_vpar->sequence.i_width
+ pi_x[i_b];
}
}
else if (code>=256)
tab = pl_DCT_tab2[(code>>4)-16];
else if (code>=128)
tab = pl_DCT_tab3[(code>>3)-16];
else if (code>=64)
tab = pl_DCT_tab4[(code>>2)-16];
else if (code>=32)
tab = pl_DCT_tab5[(code>>1)-16];
else if (code>=16)
tab = pl_DCT_tab6[code-16];
else
if( p_vpar->picture.b_error )
{
fprintf( stderr, "invalid Huffman code in Decode_MPEG2_Intra_Block()\n");
/* Mark this block as skipped (better than green blocks), and go
* to the next slice. */
(*pi_mb_address)--;
vpar_DestroyMacroblock( &p_vpar->vfifo, p_mb );
return;
}
if( (i_run != tab.i_run) || (i_length != tab.i_length) || (i_level != tab.i_level) )
/* chrominance */
p_data1 = p_mb->p_picture->p_u
+ p_mb->i_c_x
+ p_vpar->mb.i_c_y
* (p_vpar->sequence.i_chroma_width);
p_data2 = p_mb->p_picture->p_v
+ p_mb->i_c_x
+ p_vpar->mb.i_c_y
* (p_vpar->sequence.i_chroma_width);
for( i_b = 4; i_b < 4 + p_mb->i_chroma_nb_blocks;
i_b++, i_mask >>= 1 )
{
fprintf( stderr, "ET M....... !!!\n" );
exit(0);
}
yuv_data_t * pp_data[2] = {p_data1, p_data2};
if( p_mb->i_coded_block_pattern & i_mask )
{
memset( p_mb->ppi_blocks[i_b], 0, 64*sizeof(dctelem_t) );
(*pppf_decode_block[b_mpeg2]
[p_mb->i_mb_type & MB_INTRA])
( p_vpar, p_mb, i_b );
/* Calculate block coordinates. */
p_mb->p_data[i_b] = pp_data[i_b & 1]
+ pi_y[p_vpar->mb.b_dct_type][i_b]
* p_vpar->sequence.i_chroma_width
+ pi_x[i_b];
}
}
if( !p_vpar->picture.b_error )
{
UpdateContext( p_vpar, i_structure );
#ifndef VDEC_SMP
/* Decode the macroblock NOW ! */
vdec_DecodeMacroblock( p_vpar->pp_vdec[0], p_mb );
#endif
}
else
{
/* Mark this block as skipped (better than green blocks), and go
* to the next slice. */
(*pi_mb_address)--;
vpar_DestroyMacroblock( &p_vpar->vfifo, p_mb );
}
}
/*****************************************************************************
* vpar_ParseMacroblockVWXYZ : Parse the next macroblock ; specific functions
*****************************************************************************
* V = MPEG2 ?
* W = coding type ?
* X = chroma format ?
* Y = structure ?
* Z = second field ?
*****************************************************************************/
void vpar_ParseMacroblock2I420F0( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, 0, 1,
I_CODING_TYPE, CHROMA_420, FRAME_STRUCTURE, 0 );
}
void vpar_ParseMacroblock2P420F0( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, 0, 1,
P_CODING_TYPE, CHROMA_420, FRAME_STRUCTURE, 0 );
}
RemoveBits( &p_vpar->bit_stream, i_length );
void vpar_ParseMacroblock2B420F0( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, 0, 1,
B_CODING_TYPE, CHROMA_420, FRAME_STRUCTURE, 0 );
}
switch( i_run )
{
case DCT_ESCAPE:
i_run = GetBits( &p_vpar->bit_stream, 6 );
i_level = GetBits( &p_vpar->bit_stream, 12 );
/*p_mb->ppi_blocks[i_b][i_parse] = ( b_sign = ( i_level > 2047 ) )
? ( -4096 + i_level )
: i_level;*/
i_level = (b_sign = ( i_level > 2047 )) ? 4096 - i_level
: i_level;
break;
case DCT_EOB:
if( i_nc <= 1 )
{
p_mb->pf_idct[i_b] = vdec_SparseIDCT;
p_mb->pi_sparse_pos[i_b] = i_coef;
}
else
{
p_mb->pf_idct[i_b] = vdec_IDCT;
}
return;
void vpar_ParseMacroblock2I420T0( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, 0, 1,
I_CODING_TYPE, CHROMA_420, TOP_FIELD, 0 );
}
break;
default:
b_sign = GetBits( &p_vpar->bit_stream, 1 );
//p_mb->ppi_blocks[i_b][i_parse] = b_sign ? -i_level : i_level;
}
// fprintf( stderr, "i_code : %d (%d), run : %d, %d, %d (%4x) ", i_code , b_vlc_intra,
// i_run, i_level, i_parse, ShowBits( &p_vpar->bit_stream, 16 ) );
void vpar_ParseMacroblock2P420T0( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, 0, 1,
P_CODING_TYPE, CHROMA_420, TOP_FIELD, 0 );
}
//fprintf( stderr, "- %4x\n",ShowBits( &p_vpar->bit_stream, 16 ) );
if( i_parse >= 64 )
void vpar_ParseMacroblock2B420T0( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
fprintf( stderr, "Beuhh dans l'intra decode (%d)\n", i_b );
break;
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, 0, 1,
B_CODING_TYPE, CHROMA_420, TOP_FIELD, 0 );
}
i_coef = i_parse;
i_parse += i_run;
i_nc ++;
i_pos = pi_scan[p_vpar->picture.b_alternate_scan][i_parse];
i_level = ( i_level *
p_vpar->slice.i_quantizer_scale *
ppi_quant[i_quant_type][i_pos] ) >> 4;
p_mb->ppi_blocks[i_b][i_pos] = b_sign ? -i_level : i_level;
}
fprintf( stderr, "MPEG2 end (%d)\n", i_b );
p_vpar->picture.b_error = 1;
void vpar_ParseMacroblock2I420B1( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, i_mb_base, 1,
I_CODING_TYPE, CHROMA_420, BOTTOM_FIELD, 1 );
}
void vpar_ParseMacroblock2P420B1( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, i_mb_base, 1,
P_CODING_TYPE, CHROMA_420, BOTTOM_FIELD, 1 );
}
void vpar_ParseMacroblock2B420B1( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, i_mb_base, 1,
B_CODING_TYPE, CHROMA_420, BOTTOM_FIELD, 1 );
}
void vpar_ParseMacroblockGENERIC( vpar_thread_t * p_vpar, int * pi_mb_address,
int i_mb_previous, int i_mb_base,
boolean_t b_mpeg2, int i_coding_type,
int i_chroma_format, int i_structure,
boolean_t b_second_field )
{
ParseMacroblock( p_vpar, pi_mb_address, i_mb_previous, i_mb_base, b_mpeg2,
i_coding_type, i_chroma_format, i_structure, b_second_field );
}
\ No newline at end of file
......@@ -389,9 +389,6 @@ static void SequenceHeader( vpar_thread_t * p_vpar )
if( ShowBits( &p_vpar->bit_stream, 32 ) == EXTENSION_START_CODE )
{
int i_dummy;
static f_chroma_pattern_t ppf_chroma_pattern[4] =
{NULL, vpar_CodedPattern420,
vpar_CodedPattern422, vpar_CodedPattern444};
/* Turn the MPEG2 flag on */
p_vpar->sequence.b_mpeg2 = 1;
......@@ -402,8 +399,6 @@ static void SequenceHeader( vpar_thread_t * p_vpar )
RemoveBits( &p_vpar->bit_stream, 12 );
p_vpar->sequence.b_progressive = GetBits( &p_vpar->bit_stream, 1 );
p_vpar->sequence.i_chroma_format = GetBits( &p_vpar->bit_stream, 2 );
p_vpar->sequence.pf_decode_pattern = ppf_chroma_pattern
[p_vpar->sequence.i_chroma_format];
p_vpar->sequence.i_width |= GetBits( &p_vpar->bit_stream, 2 ) << 12;
p_vpar->sequence.i_height |= GetBits( &p_vpar->bit_stream, 2 ) << 12;
/* bit_rate_extension, marker_bit, vbv_buffer_size_extension, low_delay */
......@@ -413,8 +408,6 @@ static void SequenceHeader( vpar_thread_t * p_vpar )
/* frame_rate_extension_d */
p_vpar->sequence.r_frame_rate *= (i_dummy + 1)
/ (GetBits( &p_vpar->bit_stream, 5 ) + 1);
p_vpar->sequence.pf_decode_mv = vpar_MPEG2MotionVector;
}
else
{
......@@ -423,9 +416,6 @@ static void SequenceHeader( vpar_thread_t * p_vpar )
p_vpar->sequence.b_mpeg2 = 0;
p_vpar->sequence.b_progressive = 1;
p_vpar->sequence.i_chroma_format = CHROMA_420;
p_vpar->sequence.pf_decode_pattern = vpar_CodedPattern420;
p_vpar->sequence.pf_decode_mv = vpar_MPEG1MotionVector;
}
/* Update sizes */
......@@ -513,9 +503,33 @@ static void GroupHeader( vpar_thread_t * p_vpar )
*****************************************************************************/
static void PictureHeader( vpar_thread_t * p_vpar )
{
static f_macroblock_type_t ppf_macroblock_type[5] = {NULL,
vpar_IMBType, vpar_PMBType,
vpar_BMBType, vpar_DMBType};
static f_parse_mb_t ppf_parse_mb[4][4][2] =
{
{
{NULL, NULL}, {NULL, NULL}, {NULL, NULL}, {NULL, NULL}
},
{
/* I_CODING_TYPE */
{NULL, NULL},
{vpar_ParseMacroblock2I420T0, vpar_ParseMacroblockGENERIC},
{vpar_ParseMacroblockGENERIC, vpar_ParseMacroblock2I420B1},
{vpar_ParseMacroblock2I420F0, vpar_ParseMacroblock2I420F0}
},
{
/* P_CODING_TYPE */
{NULL, NULL},
{vpar_ParseMacroblock2P420T0, vpar_ParseMacroblockGENERIC},
{vpar_ParseMacroblockGENERIC, vpar_ParseMacroblock2P420B1},
{vpar_ParseMacroblock2P420F0, vpar_ParseMacroblock2P420F0}
},
{
/* B_CODING_TYPE */
{NULL, NULL},
{vpar_ParseMacroblock2B420T0, vpar_ParseMacroblockGENERIC},
{vpar_ParseMacroblockGENERIC, vpar_ParseMacroblock2B420B1},
{vpar_ParseMacroblock2B420F0, vpar_ParseMacroblock2B420F0}
}
};
int i_structure;
int i_mb_address, i_mb_base;
......@@ -527,11 +541,10 @@ static void PictureHeader( vpar_thread_t * p_vpar )
RemoveBits( &p_vpar->bit_stream, 10 ); /* temporal_reference */
p_vpar->picture.i_coding_type = GetBits( &p_vpar->bit_stream, 3 );
p_vpar->picture.pf_macroblock_type = ppf_macroblock_type
[p_vpar->picture.i_coding_type];
RemoveBits( &p_vpar->bit_stream, 16 ); /* vbv_delay */
if( p_vpar->picture.i_coding_type == P_CODING_TYPE || p_vpar->picture.i_coding_type == B_CODING_TYPE )
if( p_vpar->picture.i_coding_type == P_CODING_TYPE
|| p_vpar->picture.i_coding_type == B_CODING_TYPE )
{
p_vpar->picture.pb_full_pel_vector[0] = GetBits( &p_vpar->bit_stream, 1 );
p_vpar->picture.i_forward_f_code = GetBits( &p_vpar->bit_stream, 3 );
......@@ -733,7 +746,7 @@ static void PictureHeader( vpar_thread_t * p_vpar )
p_vpar->picture.i_structure = i_structure;
/* Initialize picture data for decoding. */
if( (p_vpar->picture.b_motion_field = (i_structure == BOTTOM_FIELD)) )
if( i_structure == BOTTOM_FIELD )
{
i_mb_base = p_vpar->sequence.i_mb_size >> 1;
p_vpar->mb.i_l_y = 1;
......@@ -750,10 +763,25 @@ static void PictureHeader( vpar_thread_t * p_vpar )
/* Extension and User data. */
ExtensionAndUserData( p_vpar );
/* Macroblock parsing function. */
if( p_vpar->sequence.i_chroma_format != CHROMA_420
|| !p_vpar->sequence.b_mpeg2 )
{
p_vpar->picture.pf_parse_mb = vpar_ParseMacroblockGENERIC;
}
else
{
p_vpar->picture.pf_parse_mb =
ppf_parse_mb[p_vpar->picture.i_coding_type]
[p_vpar->picture.i_structure]
[(p_vpar->picture.i_structure !=
p_vpar->picture.i_current_structure)];
}
/* Picture data (ISO/IEC 13818-2 6.2.3.7). */
NextStartCode( p_vpar );
while( i_mb_address+i_mb_base < p_vpar->sequence.i_mb_size
&& !p_vpar->picture.b_error && !p_vpar->b_die )
&& !p_vpar->b_die )
{
if( ((i_dummy = ShowBits( &p_vpar->bit_stream, 32 ))
< SLICE_START_CODE_MIN) ||
......@@ -768,7 +796,12 @@ static void PictureHeader( vpar_thread_t * p_vpar )
/* Decode slice data. */
p_vpar->sequence.pf_slice_header( p_vpar, &i_mb_address, i_mb_base, i_dummy & 255 );
}
if( p_vpar->b_die || p_vpar->b_error )
{
return;
}
if( p_vpar->picture.b_error )
{
/* Trash picture. */
......@@ -826,7 +859,9 @@ static __inline__ void SliceHeader( vpar_thread_t * p_vpar,
static int pi_dc_dct_reinit[4] = {128,256,512,1024};
int i_mb_address_save = *pi_mb_address;
p_vpar->picture.b_error = 0;
/* slice_vertical_position_extension and priority_breakpoint already done */
LoadQuantizerScale( p_vpar );
......@@ -853,11 +888,18 @@ static __inline__ void SliceHeader( vpar_thread_t * p_vpar,
do
{
vpar_ParseMacroblock( p_vpar, pi_mb_address, i_mb_address_save,
i_mb_base );
p_vpar->picture.pf_parse_mb( p_vpar, pi_mb_address,
i_mb_address_save, i_mb_base,
p_vpar->sequence.b_mpeg2,
p_vpar->picture.i_coding_type,
p_vpar->sequence.i_chroma_format,
p_vpar->picture.i_structure,
(p_vpar->picture.i_structure !=
p_vpar->picture.i_current_structure) );
i_mb_address_save = *pi_mb_address;
}
while( ShowBits( &p_vpar->bit_stream, 23 ) && !p_vpar->b_die );
while( ShowBits( &p_vpar->bit_stream, 23 ) && !p_vpar->picture.b_error
&& !p_vpar->b_die );
NextStartCode( p_vpar );
}
......
......@@ -126,6 +126,7 @@ void vpar_MotionVector( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_r,
{
int i_motion_code, i_motion_residual;
int i_r_size;
int pi_dm_vector[2];
i_r_size = p_vpar->picture.ppi_f_code[i_s][0]-1;
i_motion_code = vpar_MotionCode( p_vpar );
......@@ -140,11 +141,11 @@ void vpar_MotionVector( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_r,
{
if( GetBits(&p_vpar->bit_stream, 1) )
{
p_mb->pi_dm_vector[0] = GetBits( &p_vpar->bit_stream, 1 ) ? -1 : 1;
pi_dm_vector[0] = GetBits( &p_vpar->bit_stream, 1 ) ? -1 : 1;
}
else
{
p_mb->pi_dm_vector[0] = 0;
pi_dm_vector[0] = 0;
}
}
......@@ -172,12 +173,55 @@ void vpar_MotionVector( vpar_thread_t * p_vpar, macroblock_t * p_mb, int i_r,
{
if( GetBits(&p_vpar->bit_stream, 1) )
{
p_mb->pi_dm_vector[1] = GetBits( &p_vpar->bit_stream, 1 ) ? -1 : 1;
pi_dm_vector[1] = GetBits( &p_vpar->bit_stream, 1 ) ? -1 : 1;
}
else
{
p_mb->pi_dm_vector[1] = 0;
pi_dm_vector[1] = 0;
}
/* Dual Prime Arithmetic (ISO/IEC 13818-2 section 7.6.3.6) */
#define i_mv_x p_mb->pppi_motion_vectors[0][0][0]
if( p_vpar->picture.i_structure == FRAME_STRUCTURE )
{
#define i_mv_y (p_mb->pppi_motion_vectors[0][0][1] << 1)
if( p_vpar->picture.b_top_field_first )
{
/* vector for prediction of top field from bottom field */
p_mb->ppi_dmv[0][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[0][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] - 1;
/* vector for prediction of bottom field from top field */
p_mb->ppi_dmv[1][0] = ((3*i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[1][1] = ((3*i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] + 1;
}
else
{
/* vector for prediction of top field from bottom field */
p_mb->ppi_dmv[0][0] = ((3*i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[0][1] = ((3*i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] - 1;
/* vector for prediction of bottom field from top field */
p_mb->ppi_dmv[1][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[1][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1] + 1;
}
#undef i_mv_y
}
else
{
#define i_mv_y p_mb->pppi_motion_vectors[0][0][1]
/* vector for prediction from field of opposite 'parity' */
p_mb->ppi_dmv[0][0] = ((i_mv_x + (i_mv_x > 0)) >> 1) + pi_dm_vector[0];
p_mb->ppi_dmv[0][1] = ((i_mv_y + (i_mv_y > 0)) >> 1) + pi_dm_vector[1];
/* correct for vertical field shift */
if( p_vpar->picture.i_structure == TOP_FIELD )
p_mb->ppi_dmv[0][1]--;
else
p_mb->ppi_dmv[0][1]++;
#undef i_mv_y
}
#undef i_mv_x
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment