aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/h263.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2004-04-16 01:01:45 +0000
committerMichael Niedermayer <michaelni@gmx.at>2004-04-16 01:01:45 +0000
commit137c8468e840e31f9399997b95e02341c67d0a31 (patch)
tree5b05f1f9d27b864e880fb9842a7b24b07f4b7ede /libavcodec/h263.c
parent37f5cd5a18a8e08a18814d1aca828c92ee398d10 (diff)
downloadffmpeg-137c8468e840e31f9399997b95e02341c67d0a31.tar.gz
unify table indexing (motion_val,dc_val,ac_val,coded_block changed)
minor +-1 bugfix Originally committed as revision 3016 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/h263.c')
-rw-r--r--libavcodec/h263.c139
1 files changed, 38 insertions, 101 deletions
diff --git a/libavcodec/h263.c b/libavcodec/h263.c
index c60044fd37..f156980ab2 100644
--- a/libavcodec/h263.c
+++ b/libavcodec/h263.c
@@ -610,7 +610,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
void ff_h263_update_motion_val(MpegEncContext * s){
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
//FIXME a lot of thet is only needed for !low_delay
- const int wrap = s->block_wrap[0];
+ const int wrap = s->b8_stride;
const int xy = s->block_index[0];
s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
@@ -985,7 +985,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
}
/* motion vectors: 16x16 mode */
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
h263_encode_motion(s, motion_x - pred_x, s->f_code);
h263_encode_motion(s, motion_y - pred_y, s->f_code);
@@ -1009,7 +1009,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
}
/* motion vectors: 16x8 interlaced mode */
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
pred_y /=2;
put_bits(&s->pb, 1, s->field_select[0][0]);
@@ -1037,7 +1037,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
for(i=0; i<4; i++){
/* motion vectors: 8x8 mode*/
- h263_pred_motion(s, i, &pred_x, &pred_y);
+ h263_pred_motion(s, i, 0, &pred_x, &pred_y);
h263_encode_motion(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x, s->f_code);
h263_encode_motion(s, s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
@@ -1185,7 +1185,7 @@ void h263_encode_mb(MpegEncContext * s,
}
/* motion vectors: 16x16 mode */
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (!s->umvplus) {
h263_encode_motion(s, motion_x - pred_x, 1);
@@ -1212,7 +1212,7 @@ void h263_encode_mb(MpegEncContext * s,
for(i=0; i<4; i++){
/* motion vectors: 8x8 mode*/
- h263_pred_motion(s, i, &pred_x, &pred_y);
+ h263_pred_motion(s, i, 0, &pred_x, &pred_y);
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
@@ -1435,16 +1435,16 @@ static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
/* find prediction */
if (n < 4) {
- x = 2 * s->mb_x + 1 + (n & 1);
- y = 2 * s->mb_y + 1 + ((n & 2) >> 1);
- wrap = s->mb_width * 2 + 2;
+ x = 2 * s->mb_x + (n & 1);
+ y = 2 * s->mb_y + ((n & 2) >> 1);
+ wrap = s->b8_stride;
dc_val = s->dc_val[0];
ac_val = s->ac_val[0][0];
scale = s->y_dc_scale;
} else {
- x = s->mb_x + 1;
- y = s->mb_y + 1;
- wrap = s->mb_width + 2;
+ x = s->mb_x;
+ y = s->mb_y;
+ wrap = s->mb_stride;
dc_val = s->dc_val[n - 4 + 1];
ac_val = s->ac_val[n - 4 + 1][0];
scale = s->c_dc_scale;
@@ -1482,16 +1482,16 @@ static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
/* find prediction */
if (n < 4) {
- x = 2 * s->mb_x + 1 + (n & 1);
- y = 2 * s->mb_y + 1 + (n>> 1);
- wrap = s->mb_width * 2 + 2;
+ x = 2 * s->mb_x + (n & 1);
+ y = 2 * s->mb_y + (n>> 1);
+ wrap = s->b8_stride;
dc_val = s->dc_val[0];
ac_val = s->ac_val[0][0];
scale = s->y_dc_scale;
} else {
- x = s->mb_x + 1;
- y = s->mb_y + 1;
- wrap = s->mb_width + 2;
+ x = s->mb_x;
+ y = s->mb_y;
+ wrap = s->mb_stride;
dc_val = s->dc_val[n - 4 + 1];
ac_val = s->ac_val[n - 4 + 1][0];
scale = s->c_dc_scale;
@@ -1562,78 +1562,15 @@ static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
}
-int16_t *h263_pred_motion(MpegEncContext * s, int block,
+int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
int *px, int *py)
{
- int xy, wrap;
- int16_t *A, *B, *C, *mot_val;
- static const int off[4]= {2, 1, 1, -1};
-
- wrap = s->block_wrap[0];
- xy = s->block_index[block];
-
- mot_val = s->current_picture.motion_val[0][xy];
-
- A = s->current_picture.motion_val[0][xy - 1];
- /* special case for first (slice) line */
- if (s->first_slice_line && block<3) {
- // we cant just change some MVs to simulate that as we need them for the B frames (and ME)
- // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
- if(block==0){ //most common case
- if(s->mb_x == s->resync_mb_x){ //rare
- *px= *py = 0;
- }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
- C = s->current_picture.motion_val[0][xy + off[block] - wrap];
- if(s->mb_x==0){
- *px = C[0];
- *py = C[1];
- }else{
- *px = mid_pred(A[0], 0, C[0]);
- *py = mid_pred(A[1], 0, C[1]);
- }
- }else{
- *px = A[0];
- *py = A[1];
- }
- }else if(block==1){
- if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
- C = s->current_picture.motion_val[0][xy + off[block] - wrap];
- *px = mid_pred(A[0], 0, C[0]);
- *py = mid_pred(A[1], 0, C[1]);
- }else{
- *px = A[0];
- *py = A[1];
- }
- }else{ /* block==2*/
- B = s->current_picture.motion_val[0][xy - wrap];
- C = s->current_picture.motion_val[0][xy + off[block] - wrap];
- if(s->mb_x == s->resync_mb_x) //rare
- A[0]=A[1]=0;
-
- *px = mid_pred(A[0], B[0], C[0]);
- *py = mid_pred(A[1], B[1], C[1]);
- }
- } else {
- B = s->current_picture.motion_val[0][xy - wrap];
- C = s->current_picture.motion_val[0][xy + off[block] - wrap];
- *px = mid_pred(A[0], B[0], C[0]);
- *py = mid_pred(A[1], B[1], C[1]);
- }
- return mot_val;
-}
-
-// identical to above but with s->current_picture->motion_val, the above one will be removed, and this renamed to it
-int16_t *h263_pred_motion2(MpegEncContext * s, int block, int dir,
- int *px, int *py)
-{
- int xy, wrap;
+ int wrap;
int16_t *A, *B, *C, (*mot_val)[2];
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
- xy = 2*(s->mb_x + s->mb_y * wrap);
-
- mot_val = s->current_picture.motion_val[dir] + xy;
+ mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
@@ -3198,10 +3135,10 @@ void ff_mpeg4_clean_buffers(MpegEncContext *s)
{
int c_wrap, c_xy, l_wrap, l_xy;
- l_wrap= s->block_wrap[0];
- l_xy= s->mb_y*l_wrap*2 + s->mb_x*2;
- c_wrap= s->block_wrap[4];
- c_xy= s->mb_y*c_wrap + s->mb_x;
+ l_wrap= s->b8_stride;
+ l_xy= (2*s->mb_y-1)*l_wrap + s->mb_x*2 - 1;
+ c_wrap= s->mb_stride;
+ c_xy= (s->mb_y-1)*c_wrap + s->mb_x - 1;
#if 0
/* clean DC */
@@ -3374,7 +3311,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
}else{ /* P/S_TYPE */
int mx, my, pred_x, pred_y, bits;
int16_t * const mot_val= s->current_picture.motion_val[0][s->block_index[0]];
- const int stride= s->block_wrap[0]*2;
+ const int stride= s->b8_stride*2;
try_again:
bits= show_bits(&s->gb, 17);
@@ -3432,7 +3369,7 @@ try_again:
if ((cbpc & 16) == 0) {
/* 16x16 motion prediction */
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if(!s->mcsel){
mx = h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
@@ -3456,7 +3393,7 @@ try_again:
int i;
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
- int16_t *mot_val= h263_pred_motion(s, i, &pred_x, &pred_y);
+ int16_t *mot_val= h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;
@@ -3710,7 +3647,7 @@ static void preview_obmc(MpegEncContext *s){
int cbpc, i, pred_x, pred_y, mx, my;
int16_t *mot_val;
const int xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
- const int stride= s->block_wrap[0]*2;
+ const int stride= s->b8_stride*2;
for(i=0; i<4; i++)
s->block_index[i]+= 2;
@@ -3750,7 +3687,7 @@ static void preview_obmc(MpegEncContext *s){
if ((cbpc & 16) == 0) {
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
- mot_val= h263_pred_motion(s, 0, &pred_x, &pred_y);
+ mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
@@ -3768,7 +3705,7 @@ static void preview_obmc(MpegEncContext *s){
} else {
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
- mot_val = h263_pred_motion(s, i, &pred_x, &pred_y);
+ mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
@@ -3860,7 +3797,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
@@ -3885,7 +3822,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
- mot_val = h263_pred_motion(s, i, &pred_x, &pred_y);
+ mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
@@ -3979,7 +3916,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
//FIXME UMV
if(USES_LIST(mb_type, 0)){
- int16_t *mot_val= h263_pred_motion2(s, 0, 0, &mx, &my);
+ int16_t *mot_val= h263_pred_motion(s, 0, 0, &mx, &my);
s->mv_dir = MV_DIR_FORWARD;
mx = h263_decode_motion(s, mx, 1);
@@ -3992,7 +3929,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
if(USES_LIST(mb_type, 1)){
- int16_t *mot_val= h263_pred_motion2(s, 0, 1, &mx, &my);
+ int16_t *mot_val= h263_pred_motion(s, 0, 1, &mx, &my);
s->mv_dir |= MV_DIR_BACKWARD;
mx = h263_decode_motion(s, mx, 1);
@@ -4147,7 +4084,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
s->field_select[0][0]= get_bits1(&s->gb);
s->field_select[0][1]= get_bits1(&s->gb);
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
for(i=0; i<2; i++){
mx = h263_decode_motion(s, pred_x, s->f_code);
@@ -4165,7 +4102,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
- h263_pred_motion(s, 0, &pred_x, &pred_y);
+ h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
@@ -4182,7 +4119,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
- mot_val = h263_pred_motion(s, i, &pred_x, &pred_y);
+ mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = h263_decode_motion(s, pred_x, s->f_code);
if (mx >= 0xffff)
return -1;