aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2002-07-15 14:15:10 +0000
committerMichael Niedermayer <michaelni@gmx.at>2002-07-15 14:15:10 +0000
commit0fd90455ecdf4f9be58c510e0a85d24829cf8845 (patch)
treea397748c44c59ba8fc26265af9200a2f3615b8d7
parentcd141f4c22175b7ef976e4ad351afafd2e7838a3 (diff)
downloadffmpeg-0fd90455ecdf4f9be58c510e0a85d24829cf8845.tar.gz
uvlinesize
export has_b_frames mb_skip with more than 2 ip buffers Originally committed as revision 762 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r--libavcodec/avcodec.h16
-rw-r--r--libavcodec/h263dec.c5
-rw-r--r--libavcodec/mpeg12.c10
-rw-r--r--libavcodec/mpegvideo.c98
-rw-r--r--libavcodec/mpegvideo.h4
-rw-r--r--libavcodec/msmpeg4.c2
-rw-r--r--libavcodec/rv10.c4
7 files changed, 78 insertions, 61 deletions
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index ad9ea74932..ceb38dba4a 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -5,8 +5,8 @@
#define LIBAVCODEC_VERSION_INT 0x000406
#define LIBAVCODEC_VERSION "0.4.6"
-#define LIBAVCODEC_BUILD 4616
-#define LIBAVCODEC_BUILD_STR "4616"
+#define LIBAVCODEC_BUILD 4617
+#define LIBAVCODEC_BUILD_STR "4617"
enum CodecID {
CODEC_ID_NONE,
@@ -258,7 +258,11 @@ typedef struct AVCodecContext {
uint8_t *dr_buffer[3];
int dr_stride;
void *dr_opaque_frame;
- void (*get_buffer_callback)(struct AVCodecContext *c, int width, int height, int pict_type);
+ void (*get_buffer_callback)(struct AVCodecContext *c, int width, int height, int pict_type);
+
+ int has_b_frames; // is 1 if the decoded stream contains b frames
+ int dr_uvstride;
+ int dr_ip_buffer_count;
//FIXME this should be reordered after kabis API is finished ...
/*
@@ -282,13 +286,13 @@ typedef struct AVCodecContext {
ul_res0,ul_res1,ul_res2,ul_res3,ul_res4,ul_res5,
ul_res6,ul_res7,ul_res8,ul_res9,ul_res10,ul_res11,ul_res12;
unsigned int
- ui_res0,ui_res1,ui_res2,ui_res3,ui_res4,ui_res5;
+ ui_res0,ui_res1,ui_res2;
unsigned short int
us_res0,us_res1,us_res2,us_res3,us_res4,us_res5,
us_res6,us_res7,us_res8,us_res9,us_res10,us_res11,us_res12;
unsigned char
uc_res0,uc_res1,uc_res2,uc_res3,uc_res4,uc_res5,
- uc_res6,uc_res7,uc_res8,uc_res9,uc_res10,uc_res11,uc_res12;
+ uc_res6,uc_res7,uc_res8,uc_res9,uc_res10,uc_res11,uc_res12;
} AVCodecContext;
typedef struct AVCodec {
@@ -299,7 +303,7 @@ typedef struct AVCodec {
int (*init)(AVCodecContext *);
int (*encode)(AVCodecContext *, UINT8 *buf, int buf_size, void *data);
int (*close)(AVCodecContext *);
- int (*decode)(AVCodecContext *, void *outdata, int *outdata_size,
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size,
UINT8 *buf, int buf_size);
int capabilities;
struct AVCodec *next;
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 7bcd0b197e..d03b9ccf5b 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -154,6 +154,7 @@ uint64_t time= rdtsc();
} else {
ret = h263_decode_picture_header(s);
}
+ avctx->has_b_frames= s->has_b_frames;
/* After H263 & mpeg4 header decode we have the height, width,*/
@@ -432,8 +433,8 @@ uint64_t time= rdtsc();
pict->data[2] = s->last_picture[2];
}
pict->linesize[0] = s->linesize;
- pict->linesize[1] = s->linesize / 2;
- pict->linesize[2] = s->linesize / 2;
+ pict->linesize[1] = s->uvlinesize;
+ pict->linesize[2] = s->uvlinesize;
avctx->quality = s->qscale;
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 890cdad35c..fab6d1aef8 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -1505,8 +1505,8 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
pict->data[1] = picture[1];
pict->data[2] = picture[2];
pict->linesize[0] = s->linesize;
- pict->linesize[1] = s->linesize / 2;
- pict->linesize[2] = s->linesize / 2;
+ pict->linesize[1] = s->uvlinesize;
+ pict->linesize[2] = s->uvlinesize;
return 1;
} else {
return 0;
@@ -1546,7 +1546,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
}
s->width = width;
s->height = height;
- s->has_b_frames = 1;
+ avctx->has_b_frames= s->has_b_frames = 1;
s->avctx = avctx;
avctx->width = width;
avctx->height = height;
@@ -1642,8 +1642,8 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
picture->data[1] = s2->next_picture[1];
picture->data[2] = s2->next_picture[2];
picture->linesize[0] = s2->linesize;
- picture->linesize[1] = s2->linesize / 2;
- picture->linesize[2] = s2->linesize / 2;
+ picture->linesize[1] = s2->uvlinesize;
+ picture->linesize[2] = s2->uvlinesize;
*data_size = sizeof(AVPicture);
}
return 0;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index af5273bd88..6528ee9a35 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -145,7 +145,8 @@ int MPV_common_init(MpegEncContext *s)
s->mb_height = (s->height + 15) / 16;
s->mb_num = s->mb_width * s->mb_height;
if(!(s->flags&CODEC_FLAG_DR1)){
- s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH;
+ s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH;
+ s->uvlinesize = s->mb_width * 8 + EDGE_WIDTH;
for(i=0;i<3;i++) {
int w, h, shift, pict_start;
@@ -153,8 +154,8 @@ int MPV_common_init(MpegEncContext *s)
w = s->linesize;
h = s->mb_height * 16 + 2 * EDGE_WIDTH;
shift = (i == 0) ? 0 : 1;
- c_size = (w >> shift) * (h >> shift);
- pict_start = (w >> shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift);
+ c_size = (s->linesize>>shift) * (h >> shift);
+ pict_start = (s->linesize>>shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift);
CHECKED_ALLOCZ(pict, c_size)
s->last_picture_base[i] = pict;
@@ -175,6 +176,7 @@ int MPV_common_init(MpegEncContext *s)
if(i>0) memset(s->aux_picture_base[i], 128, c_size);
}
}
+ s->ip_buffer_count= 2;
}
CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+32)*2*17);
@@ -622,7 +624,9 @@ void MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
int i;
avctx->get_buffer_callback(avctx, s->width, s->height, s->pict_type);
- s->linesize= avctx->dr_stride;
+ s->linesize = avctx->dr_stride;
+ s->uvlinesize= avctx->dr_uvstride;
+ s->ip_buffer_count= avctx->dr_ip_buffer_count;
}
if (s->pict_type == B_TYPE) {
@@ -664,13 +668,13 @@ void MPV_frame_end(MpegEncContext *s)
if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
if(s->avctx==NULL || s->avctx->codec->id!=CODEC_ID_MPEG4 || s->divx_version>=500){
draw_edges(s->current_picture[0], s->linesize, s->mb_width*16, s->mb_height*16, EDGE_WIDTH);
- draw_edges(s->current_picture[1], s->linesize/2, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2);
- draw_edges(s->current_picture[2], s->linesize/2, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2);
+ draw_edges(s->current_picture[1], s->uvlinesize, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2);
+ draw_edges(s->current_picture[2], s->uvlinesize, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2);
}else{
/* mpeg4? / opendivx / xvid */
draw_edges(s->current_picture[0], s->linesize, s->width, s->height, EDGE_WIDTH);
- draw_edges(s->current_picture[1], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2);
- draw_edges(s->current_picture[2], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2);
+ draw_edges(s->current_picture[1], s->uvlinesize, s->width/2, s->height/2, EDGE_WIDTH/2);
+ draw_edges(s->current_picture[2], s->uvlinesize, s->width/2, s->height/2, EDGE_WIDTH/2);
}
}
emms_c();
@@ -715,8 +719,8 @@ void reorder_input(MpegEncContext *s, AVPicture *pict)
//printf("index:%d type:%d strides: %d %d\n", index, s->input_pict_type, pict->linesize[0], s->linesize);
if( (index==0 || (s->flags&CODEC_FLAG_INPUT_PRESERVED))
&& pict->linesize[0] == s->linesize
- && pict->linesize[1] == s->linesize>>1
- && pict->linesize[2] == s->linesize>>1){
+ && pict->linesize[1] == s->uvlinesize
+ && pict->linesize[2] == s->uvlinesize){
//printf("ptr\n");
for(i=0; i<3; i++){
s->coded_order[index].picture[i]= pict->data[i];
@@ -870,7 +874,7 @@ static inline void gmc1_motion(MpegEncContext *s,
int h)
{
UINT8 *ptr;
- int offset, src_x, src_y, linesize;
+ int offset, src_x, src_y, linesize, uvlinesize;
int motion_x, motion_y;
if(s->real_sprite_warping_points>1) printf("more than 1 warp point isnt supported\n");
@@ -888,6 +892,7 @@ static inline void gmc1_motion(MpegEncContext *s,
motion_y =0;
linesize = s->linesize;
+ uvlinesize = s->uvlinesize;
ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
dest_y+=dest_offset;
@@ -907,11 +912,11 @@ static inline void gmc1_motion(MpegEncContext *s,
if (src_y == s->height>>1)
motion_y =0;
- offset = (src_y * linesize>>1) + src_x + (src_offset>>1);
+ offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
ptr = ref_picture[1] + offset;
- gmc1(dest_cb + (dest_offset>>1), ptr, linesize>>1, h>>1, motion_x&15, motion_y&15, s->no_rounding);
+ gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding);
ptr = ref_picture[2] + offset;
- gmc1(dest_cr + (dest_offset>>1), ptr, linesize>>1, h>>1, motion_x&15, motion_y&15, s->no_rounding);
+ gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding);
return;
}
@@ -1047,20 +1052,20 @@ if(s->quarter_sample)
if (src_y == (height >> 1))
dxy &= ~2;
- offset = (src_y * (linesize >> 1)) + src_x + (src_offset >> 1);
+ offset = (src_y * s->uvlinesize) + src_x + (src_offset >> 1);
ptr = ref_picture[1] + offset;
if(emu){
- emulated_edge_mc(s->edge_emu_buffer, ptr, linesize>>1, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
+ emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, linesize >> 1, h >> 1);
+ pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
ptr = ref_picture[2] + offset;
if(emu){
- emulated_edge_mc(s->edge_emu_buffer, ptr, linesize>>1, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
+ emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, linesize >> 1, h >> 1);
+ pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
}
static inline void qpel_motion(MpegEncContext *s,
@@ -1126,20 +1131,20 @@ static inline void qpel_motion(MpegEncContext *s,
if (src_y == (height >> 1))
dxy &= ~2;
- offset = (src_y * (linesize >> 1)) + src_x + (src_offset >> 1);
+ offset = (src_y * s->uvlinesize) + src_x + (src_offset >> 1);
ptr = ref_picture[1] + offset;
if(emu){
- emulated_edge_mc(s->edge_emu_buffer, ptr, linesize>>1, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
+ emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, linesize >> 1, h >> 1);
+ pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
ptr = ref_picture[2] + offset;
if(emu){
- emulated_edge_mc(s->edge_emu_buffer, ptr, linesize>>1, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
+ emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, (h>>1)+1, src_x, src_y, s->width>>1, height>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, linesize >> 1, h >> 1);
+ pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, s->uvlinesize, h >> 1);
}
@@ -1246,24 +1251,24 @@ static inline void MPV_motion(MpegEncContext *s,
if (src_y == s->height/2)
dxy &= ~2;
- offset = (src_y * (s->linesize >> 1)) + src_x;
+ offset = (src_y * (s->uvlinesize)) + src_x;
ptr = ref_picture[1] + offset;
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->width >>1
|| src_y + (dxy>>1) + 8 > s->height>>1){
- emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize>>1, 9, 9, src_x, src_y, s->width>>1, s->height>>1);
+ emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->width>>1, s->height>>1);
ptr= s->edge_emu_buffer;
emu=1;
}
}
- pix_op[dxy](dest_cb, ptr, s->linesize >> 1, 8);
+ pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
ptr = ref_picture[2] + offset;
if(emu){
- emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize>>1, 9, 9, src_x, src_y, s->width>>1, s->height>>1);
+ emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->width>>1, s->height>>1);
ptr= s->edge_emu_buffer;
}
- pix_op[dxy](dest_cr, ptr, s->linesize >> 1, 8);
+ pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
break;
case MV_TYPE_FIELD:
if (s->picture_structure == PICT_FRAME) {
@@ -1425,19 +1430,22 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
if (s->mb_skiped) {
s->mb_skiped = 0;
+
+ (*mbskip_ptr) ++; /* indicate that this time we skiped it */
+ if(*mbskip_ptr >99) *mbskip_ptr= 99;
+
/* if previous was skipped too, then nothing to do !
skip only during decoding as we might trash the buffers during encoding a bit */
- if (*mbskip_ptr != 0 && !s->encoding)
+ if (*mbskip_ptr >= s->ip_buffer_count && !s->encoding)
goto the_end;
- *mbskip_ptr = 1; /* indicate that this time we skiped it */
} else {
*mbskip_ptr = 0; /* not skipped */
}
}
dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize) + mb_x * 16;
- dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
- dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
+ dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
+ dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
if (s->interlaced_dct) {
dct_linesize = s->linesize * 2;
@@ -1482,8 +1490,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
- add_dequant_dct(s, block[4], 4, dest_cb, s->linesize >> 1);
- add_dequant_dct(s, block[5], 5, dest_cr, s->linesize >> 1);
+ add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
+ add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
}
} else {
add_dct(s, block[0], 0, dest_y, dct_linesize);
@@ -1492,8 +1500,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
- add_dct(s, block[4], 4, dest_cb, s->linesize >> 1);
- add_dct(s, block[5], 5, dest_cr, s->linesize >> 1);
+ add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
+ add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
}
}
} else {
@@ -1504,8 +1512,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
- put_dct(s, block[4], 4, dest_cb, s->linesize >> 1);
- put_dct(s, block[5], 5, dest_cr, s->linesize >> 1);
+ put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
+ put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
}
}
}
@@ -1620,8 +1628,8 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int wrap_y, wrap_c;
dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
- dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
- dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
+ dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
+ dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
wrap_y = s->linesize;
wrap_c = wrap_y>>1;
ptr_y = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
@@ -2566,8 +2574,8 @@ static void remove_ac(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint
for(y=0; y<8; y++){
int x;
for(x=0; x<8; x++){
- dest_cb[x + y*(s->linesize>>1)]= dcb/8;
- dest_cr[x + y*(s->linesize>>1)]= dcr/8;
+ dest_cb[x + y*(s->uvlinesize)]= dcb/8;
+ dest_cr[x + y*(s->uvlinesize)]= dcr/8;
}
}
}
@@ -2622,8 +2630,8 @@ void ff_conceal_past_errors(MpegEncContext *s, int unknown_pos)
for(; mb_y>=0 && mb_y>=s->resync_mb_y; mb_y--){
for(; mb_x>=0; mb_x--){
uint8_t *dest_y = s->current_picture[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
- uint8_t *dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
- uint8_t *dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8;
+ uint8_t *dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
+ uint8_t *dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
int mb_x_backup= s->mb_x; //FIXME pass xy to mpeg_motion
int mb_y_backup= s->mb_y;
s->mb_x=mb_x;
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 9ee393cb1c..b04d4f74cd 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -132,6 +132,7 @@ typedef struct MpegEncContext {
int mb_width, mb_height; /* number of MBs horizontally & vertically */
int mb_num; /* number of MBs of a picture */
int linesize; /* line size, in bytes, may be different from width */
+ int uvlinesize; /* line size, for chroma in bytes, may be different from width */
UINT8 *new_picture[3]; /* picture to be compressed */
UINT8 *picture_buffer[REORDER_BUFFER_SIZE][3]; /* internal buffers used for reordering of input pictures */
int picture_buffer_index;
@@ -145,6 +146,7 @@ typedef struct MpegEncContext {
UINT8 *current_picture[3]; /* buffer to store the decompressed current picture */
void *last_dr_opaque;
void *next_dr_opaque;
+ int ip_buffer_count; /* number of buffers, currently only >2 if dr1 is used */
int num_available_buffers; /* is 0 at the start & after seeking, after the first I frame its 1 after next I/P 2 */
int last_dc[3]; /* last DC values for MPEG1 */
INT16 *dc_val[3]; /* used for mpeg4 DC prediction, all 3 arrays must be continuous */
@@ -340,6 +342,8 @@ typedef struct MpegEncContext {
int quant_precision;
int quarter_sample; /* 1->qpel, 0->half pel ME/MC */
int scalability;
+ int hierachy_type;
+ int enhancement_type;
int new_pred;
int reduced_res_vop;
int aspect_ratio_info;
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index 7ec550e188..34dc3569d8 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -763,7 +763,7 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
wrap= s->linesize;
dest= s->current_picture[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
}else{
- wrap= s->linesize>>1;
+ wrap= s->uvlinesize;
dest= s->current_picture[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
}
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index 9ba12befe3..438d0dd387 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -487,8 +487,8 @@ static int rv10_decode_frame(AVCodecContext *avctx,
pict->data[1] = s->current_picture[1];
pict->data[2] = s->current_picture[2];
pict->linesize[0] = s->linesize;
- pict->linesize[1] = s->linesize / 2;
- pict->linesize[2] = s->linesize / 2;
+ pict->linesize[1] = s->uvlinesize;
+ pict->linesize[2] = s->uvlinesize;
avctx->quality = s->qscale;
*data_size = sizeof(AVPicture);