Commit 3a466c5e authored by michael's avatar michael

support reusing mb types and field select values of the source file, but use...

support reusing mb types and field select values of the source file, but use motion vectors just as additional predictors
minor cleanup
segfault fix


git-svn-id: file:///var/local/repositories/ffmpeg/trunk@3060 9553f0bf-9b14-0410-a0b8-cfaf0461ba5b
parent a1c912eb
...@@ -161,6 +161,7 @@ static int sc_threshold = 0; ...@@ -161,6 +161,7 @@ static int sc_threshold = 0;
static int debug = 0; static int debug = 0;
static int debug_mv = 0; static int debug_mv = 0;
static int me_threshold = 0; static int me_threshold = 0;
static int mb_threshold = 0;
extern int loop_input; /* currently a hack */ extern int loop_input; /* currently a hack */
static int gop_size = 12; static int gop_size = 12;
...@@ -1866,6 +1867,10 @@ static void opt_me_threshold(const char *arg) ...@@ -1866,6 +1867,10 @@ static void opt_me_threshold(const char *arg)
me_threshold = atoi(arg); me_threshold = atoi(arg);
} }
static void opt_mb_threshold(const char *arg)
{
mb_threshold = atoi(arg);
}
static void opt_error_resilience(const char *arg) static void opt_error_resilience(const char *arg)
{ {
...@@ -2873,6 +2878,7 @@ static void opt_output_file(const char *filename) ...@@ -2873,6 +2878,7 @@ static void opt_output_file(const char *filename)
video_enc->dct_algo = dct_algo; video_enc->dct_algo = dct_algo;
video_enc->idct_algo = idct_algo; video_enc->idct_algo = idct_algo;
video_enc->me_threshold= me_threshold; video_enc->me_threshold= me_threshold;
video_enc->mb_threshold= mb_threshold;
video_enc->strict_std_compliance = strict; video_enc->strict_std_compliance = strict;
video_enc->error_rate = error_rate; video_enc->error_rate = error_rate;
video_enc->noise_reduction= noise_reduction; video_enc->noise_reduction= noise_reduction;
...@@ -3494,6 +3500,7 @@ const OptionDef options[] = { ...@@ -3494,6 +3500,7 @@ const OptionDef options[] = {
{ "dct_algo", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_dct_algo}, "set dct algo", "algo" }, { "dct_algo", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_dct_algo}, "set dct algo", "algo" },
{ "idct_algo", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_idct_algo}, "set idct algo", "algo" }, { "idct_algo", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_idct_algo}, "set idct algo", "algo" },
{ "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimaton threshold", "" }, { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimaton threshold", "" },
{ "mb_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_mb_threshold}, "macroblock threshold", "" },
{ "er", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_error_resilience}, "set error resilience", "n" }, { "er", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_error_resilience}, "set error resilience", "n" },
{ "ec", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_error_concealment}, "set error concealment", "bit_mask" }, { "ec", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_error_concealment}, "set error concealment", "bit_mask" },
{ "bf", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_b_frames}, "use 'frames' B frames", "frames" }, { "bf", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_b_frames}, "use 'frames' B frames", "frames" },
......
...@@ -17,7 +17,7 @@ extern "C" { ...@@ -17,7 +17,7 @@ extern "C" {
#define FFMPEG_VERSION_INT 0x000408 #define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8" #define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4709 #define LIBAVCODEC_BUILD 4710
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT #define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION #define LIBAVCODEC_VERSION FFMPEG_VERSION
...@@ -1570,12 +1570,20 @@ typedef struct AVCodecContext { ...@@ -1570,12 +1570,20 @@ typedef struct AVCodecContext {
void *thread_opaque; void *thread_opaque;
/** /**
* Motion estimation threshold. * Motion estimation threshold. under which no motion estimation is
* performed, but instead the user specified motion vectors are used
* *
* - encoding: set by user * - encoding: set by user
* - decoding: set by user * - decoding: unused
*/ */
int me_threshold; int me_threshold;
/**
* Macroblock threshold. under which the user specified macroblock types will be used
* - encoding: set by user
* - decoding: unused
*/
int mb_threshold;
} AVCodecContext; } AVCodecContext;
......
...@@ -859,7 +859,7 @@ static inline void init_interlaced_ref(MpegEncContext *s, int ref_index){ ...@@ -859,7 +859,7 @@ static inline void init_interlaced_ref(MpegEncContext *s, int ref_index){
} }
static int interlaced_search(MpegEncContext *s, int ref_index, static int interlaced_search(MpegEncContext *s, int ref_index,
int16_t (*mv_tables[2][2])[2], uint8_t *field_select_tables[2], int mx, int my) int16_t (*mv_tables[2][2])[2], uint8_t *field_select_tables[2], int mx, int my, int user_field_select)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->me;
const int size=0; const int size=0;
...@@ -889,6 +889,11 @@ static int interlaced_search(MpegEncContext *s, int ref_index, ...@@ -889,6 +889,11 @@ static int interlaced_search(MpegEncContext *s, int ref_index,
int dmin, mx_i, my_i; int dmin, mx_i, my_i;
int16_t (*mv_table)[2]= mv_tables[block][field_select]; int16_t (*mv_table)[2]= mv_tables[block][field_select];
if(user_field_select){
if(field_select_tables[block][xy] != field_select)
continue;
}
P_LEFT[0] = mv_table[xy - 1][0]; P_LEFT[0] = mv_table[xy - 1][0];
P_LEFT[1] = mv_table[xy - 1][1]; P_LEFT[1] = mv_table[xy - 1][1];
if(P_LEFT[0] > (c->xmax<<1)) P_LEFT[0] = (c->xmax<<1); if(P_LEFT[0] > (c->xmax<<1)) P_LEFT[0] = (c->xmax<<1);
...@@ -996,7 +1001,6 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int ...@@ -996,7 +1001,6 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA; s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA;
c->stride<<=1; c->stride<<=1;
c->uvstride<<=1; c->uvstride<<=1;
init_interlaced_ref(s, 2);
assert(s->flags & CODEC_FLAG_INTERLACED_ME); assert(s->flags & CODEC_FLAG_INTERLACED_ME);
...@@ -1005,6 +1009,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int ...@@ -1005,6 +1009,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
int field_select1= p->ref_index[0][xy2]; int field_select1= p->ref_index[0][xy2];
assert(field_select0==0 ||field_select0==1); assert(field_select0==0 ||field_select0==1);
assert(field_select1==0 ||field_select1==1); assert(field_select1==0 ||field_select1==1);
init_interlaced_ref(s, 0);
if(p_type){ if(p_type){
s->p_field_select_table[0][mb_xy]= field_select0; s->p_field_select_table[0][mb_xy]= field_select0;
s->p_field_select_table[1][mb_xy]= field_select1; s->p_field_select_table[1][mb_xy]= field_select1;
...@@ -1031,6 +1037,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int ...@@ -1031,6 +1037,8 @@ static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int
int field_select1= p->ref_index[1][xy2]; int field_select1= p->ref_index[1][xy2];
assert(field_select0==0 ||field_select0==1); assert(field_select0==0 ||field_select0==1);
assert(field_select1==0 ||field_select1==1); assert(field_select1==0 ||field_select1==1);
init_interlaced_ref(s, 2);
s->b_field_select_table[1][0][mb_xy]= field_select0; s->b_field_select_table[1][0][mb_xy]= field_select0;
s->b_field_select_table[1][1][mb_xy]= field_select1; s->b_field_select_table[1][1][mb_xy]= field_select1;
*(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[1][xy ]; *(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy]= *(uint32_t*)p->motion_val[1][xy ];
...@@ -1097,7 +1105,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1097,7 +1105,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->me;
uint8_t *pix, *ppix; uint8_t *pix, *ppix;
int sum, varc, vard, mx, my, dmin, xx, yy; int sum, varc, vard, mx, my, dmin;
int P[10][2]; int P[10][2];
const int shift= 1+s->quarter_sample; const int shift= 1+s->quarter_sample;
int mb_type=0; int mb_type=0;
...@@ -1117,18 +1125,20 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1117,18 +1125,20 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
get_limits(s, 16*mb_x, 16*mb_y); get_limits(s, 16*mb_x, 16*mb_y);
s->me.skip=0; s->me.skip=0;
/* intra / predictive decision */
pix = c->src[0][0];
sum = s->dsp.pix_sum(pix, s->linesize);
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
pic->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
pic->mb_var [s->mb_stride * mb_y + mb_x] = varc;
s->mb_var_sum_temp += varc;
if(s->avctx->me_threshold){ if(s->avctx->me_threshold){
vard= (check_input_motion(s, mb_x, mb_y, 1)+128)>>8; vard= (check_input_motion(s, mb_x, mb_y, 1)+128)>>8;
if(vard<s->avctx->me_threshold){ if(vard<s->avctx->me_threshold){
pix = c->src[0][0];
sum = s->dsp.pix_sum(pix, s->linesize);
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
pic->mb_var [s->mb_stride * mb_y + mb_x] = varc;
pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = vard; pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = vard;
pic->mb_mean [s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
s->mb_var_sum_temp += varc;
s->mc_mb_var_sum_temp += vard; s->mc_mb_var_sum_temp += vard;
if (vard <= 64 || vard < varc) { //FIXME if (vard <= 64 || vard < varc) { //FIXME
s->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc); s->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
...@@ -1137,6 +1147,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1137,6 +1147,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
} }
return; return;
} }
if(vard<s->avctx->mb_threshold)
mb_type= s->mb_type[mb_x + mb_y*s->mb_stride];
} }
switch(s->me_method) { switch(s->me_method) {
...@@ -1205,33 +1217,41 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1205,33 +1217,41 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
break; break;
} }
/* intra / predictive decision */
xx = mb_x * 16;
yy = mb_y * 16;
pix = c->src[0][0];
/* At this point (mx,my) are full-pell and the relative displacement */ /* At this point (mx,my) are full-pell and the relative displacement */
ppix = c->ref[0][0] + (my * s->linesize) + mx; ppix = c->ref[0][0] + (my * s->linesize) + mx;
sum = s->dsp.pix_sum(pix, s->linesize);
varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
vard = (s->dsp.sse[0](NULL, pix, ppix, s->linesize, 16)+128)>>8; vard = (s->dsp.sse[0](NULL, pix, ppix, s->linesize, 16)+128)>>8;
//printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
pic->mb_var [s->mb_stride * mb_y + mb_x] = varc;
pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = vard; pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = vard;
pic->mb_mean [s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
// pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin; // pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin;
s->mb_var_sum_temp += varc;
s->mc_mb_var_sum_temp += vard; s->mc_mb_var_sum_temp += vard;
//printf("E%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout);
#if 0 #if 0
printf("varc=%4d avg_var=%4d (sum=%4d) vard=%4d mx=%2d my=%2d\n", printf("varc=%4d avg_var=%4d (sum=%4d) vard=%4d mx=%2d my=%2d\n",
varc, s->avg_mb_var, sum, vard, mx - xx, my - yy); varc, s->avg_mb_var, sum, vard, mx - xx, my - yy);
#endif #endif
if(s->avctx->mb_decision > FF_MB_DECISION_SIMPLE){ if(mb_type){
if (vard <= 64 || vard < varc)
s->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
else
s->scene_change_score+= s->qscale;
if(mb_type == CANDIDATE_MB_TYPE_INTER){
s->me.sub_motion_search(s, &mx, &my, dmin, 0, 0, 0, 16);
set_p_mv_tables(s, mx, my, 1);
}else{
mx <<=shift;
my <<=shift;
}
if(mb_type == CANDIDATE_MB_TYPE_INTER4V){
h263_mv4_search(s, mx, my, shift);
set_p_mv_tables(s, mx, my, 0);
}
if(mb_type == CANDIDATE_MB_TYPE_INTER_I){
interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 1);
}
}else if(s->avctx->mb_decision > FF_MB_DECISION_SIMPLE){
if (vard <= 64 || vard < varc) if (vard <= 64 || vard < varc)
s->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc); s->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
else else
...@@ -1259,7 +1279,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1259,7 +1279,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
set_p_mv_tables(s, mx, my, 1); set_p_mv_tables(s, mx, my, 1);
if((s->flags&CODEC_FLAG_INTERLACED_ME) if((s->flags&CODEC_FLAG_INTERLACED_ME)
&& !s->me.skip){ //FIXME varc/d checks && !s->me.skip){ //FIXME varc/d checks
if(interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my) < INT_MAX) if(interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 0) < INT_MAX)
mb_type |= CANDIDATE_MB_TYPE_INTER_I; mb_type |= CANDIDATE_MB_TYPE_INTER_I;
} }
}else{ }else{
...@@ -1280,7 +1300,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, ...@@ -1280,7 +1300,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
} }
if((s->flags&CODEC_FLAG_INTERLACED_ME) if((s->flags&CODEC_FLAG_INTERLACED_ME)
&& !s->me.skip){ //FIXME varc/d checks && !s->me.skip){ //FIXME varc/d checks
int dmin_i= interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my); int dmin_i= interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 0);
if(dmin_i < dmin){ if(dmin_i < dmin){
mb_type = CANDIDATE_MB_TYPE_INTER_I; mb_type = CANDIDATE_MB_TYPE_INTER_I;
dmin= dmin_i; dmin= dmin_i;
...@@ -1690,7 +1710,9 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, ...@@ -1690,7 +1710,9 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
const int penalty_factor= s->me.mb_penalty_factor; const int penalty_factor= s->me.mb_penalty_factor;
int fmin, bmin, dmin, fbmin, bimin, fimin; int fmin, bmin, dmin, fbmin, bimin, fimin;
int type=0; int type=0;
const int xy = mb_y*s->mb_stride + mb_x;
init_ref(s, s->new_picture.data, s->last_picture.data, s->next_picture.data, 16*mb_x, 16*mb_y, 2); init_ref(s, s->new_picture.data, s->last_picture.data, s->next_picture.data, 16*mb_x, 16*mb_y, 2);
s->me.skip=0; s->me.skip=0;
if(s->avctx->me_threshold){ if(s->avctx->me_threshold){
...@@ -1713,6 +1735,35 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, ...@@ -1713,6 +1735,35 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
}*/ }*/
return; return;
} }
if(vard<s->avctx->mb_threshold){
type= s->mb_type[mb_y*s->mb_stride + mb_x];
if(type == CANDIDATE_MB_TYPE_DIRECT){
direct_search(s, mb_x, mb_y);
}
if(type == CANDIDATE_MB_TYPE_FORWARD || type == CANDIDATE_MB_TYPE_BIDIR){
s->me.skip=0;
ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, 0, s->f_code);
}
if(type == CANDIDATE_MB_TYPE_BACKWARD || type == CANDIDATE_MB_TYPE_BIDIR){
s->me.skip=0;
ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, 2, s->b_code);
}
if(type == CANDIDATE_MB_TYPE_FORWARD_I || type == CANDIDATE_MB_TYPE_BIDIR_I){
s->me.skip=0;
s->me.current_mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV;
interlaced_search(s, 0,
s->b_field_mv_table[0], s->b_field_select_table[0],
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 1);
}
if(type == CANDIDATE_MB_TYPE_BACKWARD_I || type == CANDIDATE_MB_TYPE_BIDIR_I){
s->me.skip=0;
s->me.current_mv_penalty= s->me.mv_penalty[s->b_code] + MAX_MV;
interlaced_search(s, 2,
s->b_field_mv_table[1], s->b_field_select_table[1],
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 1);
}
return;
}
} }
if (s->codec_id == CODEC_ID_MPEG4) if (s->codec_id == CODEC_ID_MPEG4)
...@@ -1732,18 +1783,16 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, ...@@ -1732,18 +1783,16 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
//printf("%d %d %d %d\n", dmin, fmin, bmin, fbmin); //printf("%d %d %d %d\n", dmin, fmin, bmin, fbmin);
if(s->flags & CODEC_FLAG_INTERLACED_ME){ if(s->flags & CODEC_FLAG_INTERLACED_ME){
const int xy = mb_y*s->mb_stride + mb_x;
//FIXME mb type penalty //FIXME mb type penalty
s->me.skip=0; s->me.skip=0;
s->me.current_mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV; s->me.current_mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV;
fimin= interlaced_search(s, 0, fimin= interlaced_search(s, 0,
s->b_field_mv_table[0], s->b_field_select_table[0], s->b_field_mv_table[0], s->b_field_select_table[0],
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 0);
s->me.current_mv_penalty= s->me.mv_penalty[s->b_code] + MAX_MV; s->me.current_mv_penalty= s->me.mv_penalty[s->b_code] + MAX_MV;
bimin= interlaced_search(s, 2, bimin= interlaced_search(s, 2,
s->b_field_mv_table[1], s->b_field_select_table[1], s->b_field_mv_table[1], s->b_field_select_table[1],
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1]); s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 0);
}else }else
fimin= bimin= INT_MAX; fimin= bimin= INT_MAX;
......
...@@ -229,7 +229,6 @@ typedef struct MotionEstContext{ ...@@ -229,7 +229,6 @@ typedef struct MotionEstContext{
/* cmp, chroma_cmp;*/ /* cmp, chroma_cmp;*/
op_pixels_func (*hpel_put)[4]; op_pixels_func (*hpel_put)[4];
op_pixels_func (*hpel_avg)[4]; op_pixels_func (*hpel_avg)[4];
op_pixels_func (*chroma_hpel_put)[4];
qpel_mc_func (*qpel_put)[16]; qpel_mc_func (*qpel_put)[16];
qpel_mc_func (*qpel_avg)[16]; qpel_mc_func (*qpel_avg)[16];
uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment