Commit 504501c8 authored by michaelni's avatar michaelni

cleanup / messup?

fixes 20% speedloss bug
removes redundant variables from MpegEncContext
release buffers in avcodec_flush_buffers() (untested)


git-svn-id: file:///var/local/repositories/ffmpeg/trunk@1325 9553f0bf-9b14-0410-a0b8-cfaf0461ba5b
parent c3cb35ce
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
#define LIBAVCODEC_VERSION_INT 0x000406 #define LIBAVCODEC_VERSION_INT 0x000406
#define LIBAVCODEC_VERSION "0.4.6" #define LIBAVCODEC_VERSION "0.4.6"
#define LIBAVCODEC_BUILD 4643 #define LIBAVCODEC_BUILD 4644
#define LIBAVCODEC_BUILD_STR "4643" #define LIBAVCODEC_BUILD_STR "4644"
enum CodecID { enum CodecID {
CODEC_ID_NONE, CODEC_ID_NONE,
...@@ -119,7 +119,7 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG, ...@@ -119,7 +119,7 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG,
ME_X1, ME_EPZS, ME_FULL }; ME_X1, ME_EPZS, ME_FULL };
#define FF_MAX_B_FRAMES 4 #define FF_MAX_B_FRAMES 8
/* encoding support /* encoding support
these flags can be passed in AVCodecContext.flags before initing these flags can be passed in AVCodecContext.flags before initing
...@@ -260,6 +260,19 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG, ...@@ -260,6 +260,19 @@ static const int Motion_Est_QTab[] = { ME_ZERO, ME_PHODS, ME_LOG,
* decoding: unused\ * decoding: unused\
*/\ */\
uint64_t error[4];\ uint64_t error[4];\
\
/**\
* type of the buffer (to keep track of who has to dealloc data[*])\
* encoding: set by the one who allocs it\
* decoding: set by the one who allocs it\
* Note: user allocated (direct rendering) & internal buffers can not coexist currently\
*/\
int type;\
#define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 // Direct rendering buffers
#define FF_BUFFER_TYPE_SHARED 4 // input frame for encoding(wont be dealloced)
#define FF_I_TYPE 1 // Intra #define FF_I_TYPE 1 // Intra
#define FF_P_TYPE 2 // Predicted #define FF_P_TYPE 2 // Predicted
......
...@@ -55,6 +55,7 @@ static int h263_decode_init(AVCodecContext *avctx) ...@@ -55,6 +55,7 @@ static int h263_decode_init(AVCodecContext *avctx)
s->quant_precision=5; s->quant_precision=5;
s->progressive_sequence=1; s->progressive_sequence=1;
s->decode_mb= ff_h263_decode_mb; s->decode_mb= ff_h263_decode_mb;
s->low_delay= 1;
/* select sub codec */ /* select sub codec */
switch(avctx->codec->id) { switch(avctx->codec->id) {
...@@ -64,7 +65,7 @@ static int h263_decode_init(AVCodecContext *avctx) ...@@ -64,7 +65,7 @@ static int h263_decode_init(AVCodecContext *avctx)
case CODEC_ID_MPEG4: case CODEC_ID_MPEG4:
s->time_increment_bits = 4; /* default value for broken headers */ s->time_increment_bits = 4; /* default value for broken headers */
s->h263_pred = 1; s->h263_pred = 1;
s->has_b_frames = 1; //default, might be overriden in the vol header during header parsing s->low_delay = 0; //default, might be overriden in the vol header during header parsing
break; break;
case CODEC_ID_MSMPEG4V1: case CODEC_ID_MSMPEG4V1:
s->h263_msmpeg4 = 1; s->h263_msmpeg4 = 1;
...@@ -430,14 +431,12 @@ retry: ...@@ -430,14 +431,12 @@ retry:
if(s->flags& CODEC_FLAG_LOW_DELAY) if(s->flags& CODEC_FLAG_LOW_DELAY)
s->low_delay=1; s->low_delay=1;
s->has_b_frames= !s->low_delay;
} else if (s->h263_intel) { } else if (s->h263_intel) {
ret = intel_h263_decode_picture_header(s); ret = intel_h263_decode_picture_header(s);
} else { } else {
ret = h263_decode_picture_header(s); ret = h263_decode_picture_header(s);
} }
avctx->has_b_frames= s->has_b_frames; avctx->has_b_frames= !s->low_delay;
if(s->workaround_bugs&FF_BUG_AUTODETECT){ if(s->workaround_bugs&FF_BUG_AUTODETECT){
if(s->avctx->fourcc == ff_get_fourcc("XVIX")) if(s->avctx->fourcc == ff_get_fourcc("XVIX"))
...@@ -531,7 +530,7 @@ retry: ...@@ -531,7 +530,7 @@ retry:
s->current_picture.key_frame= s->pict_type == I_TYPE; s->current_picture.key_frame= s->pict_type == I_TYPE;
/* skip b frames if we dont have reference frames */ /* skip b frames if we dont have reference frames */
if(s->num_available_buffers<2 && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); if(s->last_picture.data[0]==NULL && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size);
/* skip b frames if we are in a hurry */ /* skip b frames if we are in a hurry */
if(avctx->hurry_up && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); if(avctx->hurry_up && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size);
/* skip everything if we are in a hurry>=5 */ /* skip everything if we are in a hurry>=5 */
...@@ -676,7 +675,7 @@ retry: ...@@ -676,7 +675,7 @@ retry:
} }
#endif #endif
if(s->pict_type==B_TYPE || (!s->has_b_frames)){ if(s->pict_type==B_TYPE || s->low_delay){
*pict= *(AVVideoFrame*)&s->current_picture; *pict= *(AVVideoFrame*)&s->current_picture;
} else { } else {
*pict= *(AVVideoFrame*)&s->last_picture; *pict= *(AVVideoFrame*)&s->last_picture;
...@@ -686,9 +685,8 @@ retry: ...@@ -686,9 +685,8 @@ retry:
/* we substract 1 because it is added on utils.c */ /* we substract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1; avctx->frame_number = s->picture_number - 1;
/* dont output the last pic after seeking /* dont output the last pic after seeking */
note we allready added +1 for the current pix in MPV_frame_end(s) */ if(s->last_picture.data[0] || s->low_delay)
if(s->num_available_buffers>=2 || (!s->has_b_frames))
*data_size = sizeof(AVVideoFrame); *data_size = sizeof(AVVideoFrame);
#ifdef PRINT_FRAME_TIME #ifdef PRINT_FRAME_TIME
printf("%Ld\n", rdtsc()-time); printf("%Ld\n", rdtsc()-time);
......
...@@ -1702,7 +1702,7 @@ eos: //end of slice ...@@ -1702,7 +1702,7 @@ eos: //end of slice
MPV_frame_end(s); MPV_frame_end(s);
if (s->pict_type == B_TYPE) { if (s->pict_type == B_TYPE || s->low_delay) {
*pict= *(AVVideoFrame*)&s->current_picture; *pict= *(AVVideoFrame*)&s->current_picture;
} else { } else {
s->picture_number++; s->picture_number++;
...@@ -1756,7 +1756,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, ...@@ -1756,7 +1756,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
} }
s->width = width; s->width = width;
s->height = height; s->height = height;
avctx->has_b_frames= s->has_b_frames = 1; avctx->has_b_frames= 1;
s->avctx = avctx; s->avctx = avctx;
avctx->width = width; avctx->width = width;
avctx->height = height; avctx->height = height;
......
This diff is collapsed.
...@@ -185,7 +185,6 @@ typedef struct MpegEncContext { ...@@ -185,7 +185,6 @@ typedef struct MpegEncContext {
Picture next_picture; /* previous picture (for bidir pred) */ Picture next_picture; /* previous picture (for bidir pred) */
Picture new_picture; /* source picture for encoding */ Picture new_picture; /* source picture for encoding */
Picture current_picture; /* buffer to store the decompressed current picture */ Picture current_picture; /* buffer to store the decompressed current picture */
int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */
int last_dc[3]; /* last DC values for MPEG1 */ int last_dc[3]; /* last DC values for MPEG1 */
INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */ INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */
int y_dc_scale, c_dc_scale; int y_dc_scale, c_dc_scale;
...@@ -254,7 +253,6 @@ typedef struct MpegEncContext { ...@@ -254,7 +253,6 @@ typedef struct MpegEncContext {
UINT16 (*mv_penalty)[MAX_MV*2+1]; /* amount of bits needed to encode a MV, used for ME */ UINT16 (*mv_penalty)[MAX_MV*2+1]; /* amount of bits needed to encode a MV, used for ME */
UINT8 *fcode_tab; /* smallest fcode needed for each MV */ UINT8 *fcode_tab; /* smallest fcode needed for each MV */
int has_b_frames;
int no_rounding; /* apply no rounding to motion compensation (MPEG4, msmpeg4, ...) int no_rounding; /* apply no rounding to motion compensation (MPEG4, msmpeg4, ...)
for b-frames rounding mode is allways 0 */ for b-frames rounding mode is allways 0 */
......
...@@ -1179,7 +1179,7 @@ static int svq1_decode_init(AVCodecContext *avctx) ...@@ -1179,7 +1179,7 @@ static int svq1_decode_init(AVCodecContext *avctx)
s->height = (avctx->height+3)&~3; s->height = (avctx->height+3)&~3;
s->codec_id= avctx->codec->id; s->codec_id= avctx->codec->id;
avctx->pix_fmt = PIX_FMT_YUV410P; avctx->pix_fmt = PIX_FMT_YUV410P;
avctx->has_b_frames= s->has_b_frames=1; // not true, but DP frames and these behave like unidirectional b frames avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
s->flags= avctx->flags; s->flags= avctx->flags;
if (MPV_common_init(s) < 0) return -1; if (MPV_common_init(s) < 0) return -1;
return 0; return 0;
......
...@@ -125,6 +125,9 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){ ...@@ -125,6 +125,9 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
const int width = s->width; const int width = s->width;
const int height= s->height; const int height= s->height;
DefaultPicOpaque *opaque; DefaultPicOpaque *opaque;
assert(pic->data[0]==NULL);
assert(pic->type==0 || pic->type==FF_TYPE_INTERNAL);
if(pic->opaque){ if(pic->opaque){
opaque= (DefaultPicOpaque *)pic->opaque; opaque= (DefaultPicOpaque *)pic->opaque;
...@@ -186,13 +189,14 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){ ...@@ -186,13 +189,14 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift); memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift);
if(s->flags&CODEC_FLAG_EMU_EDGE) if(s->flags&CODEC_FLAG_EMU_EDGE)
pic->data[i] = pic->base[i]; pic->data[i] = pic->base[i] + 16; //FIXME 16
else else
pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift); pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift) + 16; //FIXME 16
opaque->data[i]= pic->data[i]; opaque->data[i]= pic->data[i];
} }
pic->age= 256*256*256*64; pic->age= 256*256*256*64;
pic->type= FF_BUFFER_TYPE_INTERNAL;
} }
return 0; return 0;
...@@ -201,6 +205,8 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){ ...@@ -201,6 +205,8 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){ void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){
int i; int i;
assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
for(i=0; i<3; i++) for(i=0; i<3; i++)
pic->data[i]=NULL; pic->data[i]=NULL;
//printf("R%X\n", pic->opaque); //printf("R%X\n", pic->opaque);
...@@ -642,14 +648,39 @@ void avcodec_init(void) ...@@ -642,14 +648,39 @@ void avcodec_init(void)
//dsputil_init(); //dsputil_init();
} }
/* this should be called after seeking and before trying to decode the next frame */ /* this can be called after seeking and before trying to decode the next keyframe */
void avcodec_flush_buffers(AVCodecContext *avctx) void avcodec_flush_buffers(AVCodecContext *avctx)
{ {
int i;
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
s->num_available_buffers=0;
switch(avctx->codec_id){
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_H263:
case CODEC_ID_RV10:
case CODEC_ID_MJPEG:
case CODEC_ID_MJPEGB:
case CODEC_ID_MPEG4:
case CODEC_ID_MSMPEG4V1:
case CODEC_ID_MSMPEG4V2:
case CODEC_ID_MSMPEG4V3:
case CODEC_ID_WMV1:
case CODEC_ID_WMV2:
case CODEC_ID_H263P:
case CODEC_ID_H263I:
case CODEC_ID_SVQ1:
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
}
break;
default:
//FIXME
break;
}
} }
static int raw_encode_init(AVCodecContext *s) static int raw_encode_init(AVCodecContext *s)
{ {
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment