aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2015-01-17 22:28:46 +0100
committerAnton Khirnov <anton@khirnov.net>2015-03-21 11:27:14 +0100
commitd4d9068cdf8f4b2b87ae87a2ef880d243f77b977 (patch)
tree2ede3f693c54ca02c41d90549295fe5868f55b54 /libavcodec
parent0edbe6faa7ef80daf0e84353cbe733389bf1a522 (diff)
downloadffmpeg-d4d9068cdf8f4b2b87ae87a2ef880d243f77b977.tar.gz
h264: move mb_{x,y} into the per-slice context
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/dxva2_h264.c2
-rw-r--r--libavcodec/h264.c16
-rw-r--r--libavcodec/h264.h5
-rw-r--r--libavcodec/h264_cabac.c18
-rw-r--r--libavcodec/h264_cavlc.c24
-rw-r--r--libavcodec/h264_direct.c38
-rw-r--r--libavcodec/h264_mb.c44
-rw-r--r--libavcodec/h264_mb_template.c22
-rw-r--r--libavcodec/h264_mc_template.c2
-rw-r--r--libavcodec/h264_mvpred.h16
-rw-r--r--libavcodec/h264_slice.c130
-rw-r--r--libavcodec/svq3.c62
-rw-r--r--libavcodec/vaapi_h264.c2
13 files changed, 193 insertions, 188 deletions
diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
index 3de5f1891f..4e8c4ea552 100644
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@ -220,7 +220,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
slice->SliceBytesInBuffer = size;
slice->wBadSliceChopping = 0;
- slice->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
+ slice->first_mb_in_slice = (sl->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + sl->mb_x;
slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */
slice->BitOffsetToSliceData = get_bits_count(&h->gb);
slice->slice_type = ff_h264_get_slice_type(sl);
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 39d026f225..13f3aa4eca 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -58,8 +58,8 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
H264Context *h = opaque;
H264SliceContext *sl = &h->slice_ctx[0];
- h->mb_x = mb_x;
- h->mb_y = mb_y;
+ sl->mb_x = mb_x;
+ sl->mb_y = mb_y;
sl->mb_xy = mb_x + mb_y * h->mb_stride;
memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
assert(ref >= 0);
@@ -143,7 +143,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h, H264SliceContext *sl)
if (status < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"top block unavailable for requested intra4x4 mode %d at %d %d\n",
- status, h->mb_x, h->mb_y);
+ status, sl->mb_x, sl->mb_y);
return AVERROR_INVALIDDATA;
} else if (status) {
sl->intra4x4_pred_mode_cache[scan8[0] + i] = status;
@@ -159,7 +159,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h, H264SliceContext *sl)
if (status < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"left block unavailable for requested intra4x4 mode %d at %d %d\n",
- status, h->mb_x, h->mb_y);
+ status, sl->mb_x, sl->mb_y);
return AVERROR_INVALIDDATA;
} else if (status) {
sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
@@ -183,7 +183,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, H264SliceContext *sl,
if (mode > 3U) {
av_log(h->avctx, AV_LOG_ERROR,
"out of range intra chroma pred mode at %d %d\n",
- h->mb_x, h->mb_y);
+ sl->mb_x, sl->mb_y);
return AVERROR_INVALIDDATA;
}
@@ -192,7 +192,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, H264SliceContext *sl,
if (mode < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"top block unavailable for requested intra mode at %d %d\n",
- h->mb_x, h->mb_y);
+ sl->mb_x, sl->mb_y);
return AVERROR_INVALIDDATA;
}
}
@@ -208,7 +208,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, H264SliceContext *sl,
if (mode < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"left block unavailable for requested intra mode at %d %d\n",
- h->mb_x, h->mb_y);
+ sl->mb_x, sl->mb_y);
return AVERROR_INVALIDDATA;
}
}
@@ -1114,7 +1114,7 @@ static void flush_dpb(AVCodecContext *avctx)
h->cur_pic_ptr = NULL;
ff_h264_unref_picture(h, &h->cur_pic);
- h->mb_x = h->mb_y = 0;
+ h->mb_y = 0;
ff_h264_free_tables(h, 1);
h->context_initialized = 0;
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index ce4e163809..1e0b2327b1 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -356,6 +356,7 @@ typedef struct H264SliceContext {
ptrdiff_t mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff
ptrdiff_t mb_uvlinesize;
+ int mb_x, mb_y;
int mb_xy;
int mb_skip_run;
int is_complex;
@@ -526,7 +527,7 @@ typedef struct H264Context {
int x264_build;
- int mb_x, mb_y;
+ int mb_y;
int resync_mb_x;
int resync_mb_y;
int mb_height, mb_width;
@@ -1020,7 +1021,7 @@ static av_always_inline void write_back_motion(H264Context *h,
int mb_type)
{
const int b_stride = h->b_stride;
- const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride; // try mb2b(8)_xy
+ const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
const int b8_xy = 4 * sl->mb_xy;
if (USES_LIST(mb_type, 0)) {
diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
index fb75ff0df5..88f50d6be3 100644
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@ -1288,7 +1288,7 @@ static int decode_cabac_field_decoding_flag(H264Context *h, H264SliceContext *sl
unsigned long ctx = 0;
- ctx += h->mb_field_decoding_flag & !!h->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
+ ctx += h->mb_field_decoding_flag & !!sl->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
ctx += (h->cur_pic.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == sl->slice_num);
return get_cabac_noinline( &sl->cabac, &(sl->cabac_state+70)[ctx] );
@@ -1914,21 +1914,21 @@ int ff_h264_decode_mb_cabac(H264Context *h, H264SliceContext *sl)
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
const int pixel_shift = h->pixel_shift;
- mb_xy = sl->mb_xy = h->mb_x + h->mb_y*h->mb_stride;
+ mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
- tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y);
+ tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, sl->mb_x, sl->mb_y);
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
int skip;
/* a skipped mb needs the aff flag from the following mb */
- if (FRAME_MBAFF(h) && (h->mb_y & 1) == 1 && sl->prev_mb_skipped)
+ if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 1 && sl->prev_mb_skipped)
skip = sl->next_mb_skipped;
else
- skip = decode_cabac_mb_skip(h, sl, h->mb_x, h->mb_y );
+ skip = decode_cabac_mb_skip(h, sl, sl->mb_x, sl->mb_y );
/* read skip flags */
if( skip ) {
- if (FRAME_MBAFF(h) && (h->mb_y & 1) == 0) {
+ if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 0) {
h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP;
- sl->next_mb_skipped = decode_cabac_mb_skip(h, sl, h->mb_x, h->mb_y+1 );
+ sl->next_mb_skipped = decode_cabac_mb_skip(h, sl, sl->mb_x, sl->mb_y+1 );
if(!sl->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl);
}
@@ -1944,7 +1944,7 @@ int ff_h264_decode_mb_cabac(H264Context *h, H264SliceContext *sl)
}
}
if (FRAME_MBAFF(h)) {
- if( (h->mb_y&1) == 0 )
+ if ((sl->mb_y & 1) == 0)
h->mb_mbaff =
h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl);
}
@@ -2367,7 +2367,7 @@ decode_intra_mb:
ctx= 3;
val++;
if(val > 2*max_qp){ //prevent infinite loop
- av_log(h->avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "cabac decode of qscale diff failed at %d %d\n", sl->mb_x, sl->mb_y);
return -1;
}
}
diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
index 6b16d2ac31..95b09a9fa4 100644
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@ -477,7 +477,7 @@ static int decode_residual(H264Context *h, H264SliceContext *sl,
if(total_coeff==0)
return 0;
if(total_coeff > (unsigned)max_coeff) {
- av_log(h->avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", h->mb_x, h->mb_y, total_coeff);
+ av_log(h->avctx, AV_LOG_ERROR, "corrupted macroblock %d %d (total_coeff=%d)\n", sl->mb_x, sl->mb_y, total_coeff);
return -1;
}
@@ -615,7 +615,7 @@ static int decode_residual(H264Context *h, H264SliceContext *sl,
if (zeros_left < 0) {
av_log(h->avctx, AV_LOG_ERROR,
- "negative number of zero coeffs at %d %d\n", h->mb_x, h->mb_y);
+ "negative number of zero coeffs at %d %d\n", sl->mb_x, sl->mb_y);
return AVERROR_INVALIDDATA;
}
@@ -703,9 +703,9 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
const int pixel_shift = h->pixel_shift;
- mb_xy = sl->mb_xy = h->mb_x + h->mb_y*h->mb_stride;
+ mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
- tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, h->mb_x, h->mb_y);
+ tprintf(h->avctx, "pic:%d mb:%d/%d\n", h->frame_num, sl->mb_x, sl->mb_y);
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
down the code */
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
@@ -713,7 +713,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
sl->mb_skip_run = get_ue_golomb(&h->gb);
if (sl->mb_skip_run--) {
- if(FRAME_MBAFF(h) && (h->mb_y&1) == 0){
+ if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 0) {
if (sl->mb_skip_run == 0)
h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&h->gb);
}
@@ -722,7 +722,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
}
}
if (FRAME_MBAFF(h)) {
- if( (h->mb_y&1) == 0 )
+ if ((sl->mb_y & 1) == 0)
h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&h->gb);
}
@@ -751,7 +751,7 @@ int ff_h264_decode_mb_cavlc(H264Context *h, H264SliceContext *sl)
mb_type--;
decode_intra_mb:
if(mb_type > 25){
- av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(sl->slice_type), h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(sl->slice_type), sl->mb_x, sl->mb_y);
return -1;
}
partition_count=0;
@@ -838,7 +838,7 @@ decode_intra_mb:
for(i=0; i<4; i++){
sl->sub_mb_type[i]= get_ue_golomb_31(&h->gb);
if(sl->sub_mb_type[i] >=13){
- av_log(h->avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], sl->mb_x, sl->mb_y);
return -1;
}
sub_partition_count[i]= b_sub_mb_type_info[ sl->sub_mb_type[i] ].partition_count;
@@ -856,7 +856,7 @@ decode_intra_mb:
for(i=0; i<4; i++){
sl->sub_mb_type[i]= get_ue_golomb_31(&h->gb);
if(sl->sub_mb_type[i] >=4){
- av_log(h->avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], sl->mb_x, sl->mb_y);
return -1;
}
sub_partition_count[i]= p_sub_mb_type_info[ sl->sub_mb_type[i] ].partition_count;
@@ -1057,14 +1057,14 @@ decode_intra_mb:
if(decode_chroma){
if(cbp > 47){
- av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, sl->mb_x, sl->mb_y);
return -1;
}
if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp];
else cbp= golomb_to_inter_cbp [cbp];
}else{
if(cbp > 15){
- av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, sl->mb_x, sl->mb_y);
return -1;
}
if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp];
@@ -1103,7 +1103,7 @@ decode_intra_mb:
if (sl->qscale < 0) sl->qscale += max_qp + 1;
else sl->qscale -= max_qp+1;
if (((unsigned)sl->qscale) > max_qp){
- av_log(h->avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, h->mb_x, h->mb_y);
+ av_log(h->avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, sl->mb_x, sl->mb_y);
return -1;
}
}
diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index 136d8fa25e..43ad41de90 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -179,7 +179,7 @@ static void pred_spatial_direct_motion(H264Context *const h, H264SliceContext *s
{
int b8_stride = 2;
int b4_stride = h->b_stride;
- int mb_xy = sl->mb_xy, mb_y = h->mb_y;
+ int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
int mb_type_col[2];
const int16_t (*l1mv0)[2], (*l1mv1)[2];
const int8_t *l1ref0, *l1ref1;
@@ -193,7 +193,7 @@ static void pred_spatial_direct_motion(H264Context *const h, H264SliceContext *s
assert(sl->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &sl->ref_list[1][0],
- h->mb_y + !!IS_INTERLACED(*mb_type));
+ sl->mb_y + !!IS_INTERLACED(*mb_type));
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \
MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM)
@@ -262,9 +262,9 @@ static void pred_spatial_direct_motion(H264Context *const h, H264SliceContext *s
if (IS_INTERLACED(sl->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
- mb_y = (h->mb_y & ~1) + sl->col_parity;
- mb_xy = h->mb_x +
- ((h->mb_y & ~1) + sl->col_parity) * h->mb_stride;
+ mb_y = (sl->mb_y & ~1) + sl->col_parity;
+ mb_xy = sl->mb_x +
+ ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
b8_stride = 0;
} else {
mb_y += sl->col_fieldoff;
@@ -273,8 +273,8 @@ static void pred_spatial_direct_motion(H264Context *const h, H264SliceContext *s
goto single_col;
} else { // AFL/AFR/FR/FL -> AFR/FR
if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
- mb_y = h->mb_y & ~1;
- mb_xy = (h->mb_y & ~1) * h->mb_stride + h->mb_x;
+ mb_y = sl->mb_y & ~1;
+ mb_xy = (sl->mb_y & ~1) * h->mb_stride + sl->mb_x;
mb_type_col[0] = sl->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = sl->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2 + 4 * h->mb_stride;
@@ -323,7 +323,7 @@ single_col:
l1ref0 = &sl->ref_list[1][0].ref_index[0][4 * mb_xy];
l1ref1 = &sl->ref_list[1][0].ref_index[1][4 * mb_xy];
if (!b8_stride) {
- if (h->mb_y & 1) {
+ if (sl->mb_y & 1) {
l1ref0 += 2;
l1ref1 += 2;
l1mv0 += 2 * b4_stride;
@@ -465,7 +465,7 @@ static void pred_temp_direct_motion(H264Context *const h, H264SliceContext *sl,
{
int b8_stride = 2;
int b4_stride = h->b_stride;
- int mb_xy = sl->mb_xy, mb_y = h->mb_y;
+ int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
int mb_type_col[2];
const int16_t (*l1mv0)[2], (*l1mv1)[2];
const int8_t *l1ref0, *l1ref1;
@@ -476,13 +476,13 @@ static void pred_temp_direct_motion(H264Context *const h, H264SliceContext *sl,
assert(sl->ref_list[1][0].reference & 3);
await_reference_mb_row(h, &sl->ref_list[1][0],
- h->mb_y + !!IS_INTERLACED(*mb_type));
+ sl->mb_y + !!IS_INTERLACED(*mb_type));
if (IS_INTERLACED(sl->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
- mb_y = (h->mb_y & ~1) + sl->col_parity;
- mb_xy = h->mb_x +
- ((h->mb_y & ~1) + sl->col_parity) * h->mb_stride;
+ mb_y = (sl->mb_y & ~1) + sl->col_parity;
+ mb_xy = sl->mb_x +
+ ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
b8_stride = 0;
} else {
mb_y += sl->col_fieldoff;
@@ -491,8 +491,8 @@ static void pred_temp_direct_motion(H264Context *const h, H264SliceContext *sl,
goto single_col;
} else { // AFL/AFR/FR/FL -> AFR/FR
if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
- mb_y = h->mb_y & ~1;
- mb_xy = h->mb_x + (h->mb_y & ~1) * h->mb_stride;
+ mb_y = sl->mb_y & ~1;
+ mb_xy = sl->mb_x + (sl->mb_y & ~1) * h->mb_stride;
mb_type_col[0] = sl->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = sl->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
b8_stride = 2 + 4 * h->mb_stride;
@@ -547,7 +547,7 @@ single_col:
l1ref0 = &sl->ref_list[1][0].ref_index[0][4 * mb_xy];
l1ref1 = &sl->ref_list[1][0].ref_index[1][4 * mb_xy];
if (!b8_stride) {
- if (h->mb_y & 1) {
+ if (sl->mb_y & 1) {
l1ref0 += 2;
l1ref1 += 2;
l1mv0 += 2 * b4_stride;
@@ -562,9 +562,9 @@ single_col:
int ref_offset;
if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
- map_col_to_list0[0] = sl->map_col_to_list0_field[h->mb_y & 1][0];
- map_col_to_list0[1] = sl->map_col_to_list0_field[h->mb_y & 1][1];
- dist_scale_factor = sl->dist_scale_factor_field[h->mb_y & 1];
+ map_col_to_list0[0] = sl->map_col_to_list0_field[sl->mb_y & 1][0];
+ map_col_to_list0[1] = sl->map_col_to_list0_field[sl->mb_y & 1][1];
+ dist_scale_factor = sl->dist_scale_factor_field[sl->mb_y & 1];
}
ref_offset = (sl->ref_list[1][0].mbaff << 4) & (mb_type_col[0] >> 3);
diff --git a/libavcodec/h264_mb.c b/libavcodec/h264_mb.c
index 6410bcb684..a9ea97c73a 100644
--- a/libavcodec/h264_mb.c
+++ b/libavcodec/h264_mb.c
@@ -57,7 +57,7 @@ static inline void get_lowest_part_y(H264Context *h, H264SliceContext *sl,
{
int my;
- y_offset += 16 * (h->mb_y >> MB_FIELD(h));
+ y_offset += 16 * (sl->mb_y >> MB_FIELD(h));
if (list0) {
int ref_n = sl->ref_cache[0][scan8[n]];
@@ -287,7 +287,7 @@ static av_always_inline void mc_dir_part(H264Context *h, H264SliceContext *sl,
ysh = 3 - (chroma_idc == 2 /* yuv422 */);
if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) {
// chroma offset when predicting from a field of opposite parity
- my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
+ my += 2 * ((sl->mb_y & 1) - (pic->reference - 1));
emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
}
@@ -345,8 +345,8 @@ static av_always_inline void mc_part_std(H264Context *h, H264SliceContext *sl,
dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
}
- x_offset += 8 * h->mb_x;
- y_offset += 8 * (h->mb_y >> MB_FIELD(h));
+ x_offset += 8 * sl->mb_x;
+ y_offset += 8 * (sl->mb_y >> MB_FIELD(h));
if (list0) {
H264Picture *ref = &sl->ref_list[0][sl->ref_cache[0][scan8[n]]];
@@ -399,8 +399,8 @@ static av_always_inline void mc_part_weighted(H264Context *h, H264SliceContext *
dest_cb += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
dest_cr += (x_offset << pixel_shift) + y_offset * sl->mb_uvlinesize;
}
- x_offset += 8 * h->mb_x;
- y_offset += 8 * (h->mb_y >> MB_FIELD(h));
+ x_offset += 8 * sl->mb_x;
+ y_offset += 8 * (sl->mb_y >> MB_FIELD(h));
if (list0 && list1) {
/* don't optimize for luma-only case, since B-frames usually
@@ -421,7 +421,7 @@ static av_always_inline void mc_part_weighted(H264Context *h, H264SliceContext *
pixel_shift, chroma_idc);
if (sl->use_weight == 2) {
- int weight0 = sl->implicit_weight[refn0][refn1][h->mb_y & 1];
+ int weight0 = sl->implicit_weight[refn0][refn1][sl->mb_y & 1];
int weight1 = 64 - weight0;
luma_weight_avg(dest_y, tmp_y, sl->mb_linesize,
height, 5, weight0, weight1, 0);
@@ -482,11 +482,11 @@ static av_always_inline void prefetch_motion(H264Context *h, H264SliceContext *s
* optimized for 64byte cache lines */
const int refn = sl->ref_cache[list][scan8[0]];
if (refn >= 0) {
- const int mx = (sl->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
- const int my = (sl->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
+ const int mx = (sl->mv_cache[list][scan8[0]][0] >> 2) + 16 * sl->mb_x + 8;
+ const int my = (sl->mv_cache[list][scan8[0]][1] >> 2) + 16 * sl->mb_y;
uint8_t **src = sl->ref_list[list][refn].f.data;
int off = (mx << pixel_shift) +
- (my + (h->mb_x & 3) * 4) * sl->mb_linesize +
+ (my + (sl->mb_x & 3) * 4) * sl->mb_linesize +
(64 << pixel_shift);
h->vdsp.prefetch(src[0] + off, h->linesize, 4);
if (chroma_idc == 3 /* yuv444 */) {
@@ -494,7 +494,7 @@ static av_always_inline void prefetch_motion(H264Context *h, H264SliceContext *s
h->vdsp.prefetch(src[2] + off, h->linesize, 4);
} else {
off = ((mx >> 1) << pixel_shift) +
- ((my >> 1) + (h->mb_x & 7)) * h->uvlinesize +
+ ((my >> 1) + (sl->mb_x & 7)) * h->uvlinesize +
(64 << pixel_shift);
h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
}
@@ -515,7 +515,7 @@ static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl
uint8_t *top_border;
if (!simple && FRAME_MBAFF(h)) {
- if (h->mb_y & 1) {
+ if (sl->mb_y & 1) {
if (!MB_MBAFF(h))
return;
} else {
@@ -527,16 +527,16 @@ static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl
deblock_topleft = h->slice_table[sl->mb_xy - 1 - h->mb_stride] == sl->slice_num;
deblock_top = sl->top_type;
} else {
- deblock_topleft = (h->mb_x > 0);
- deblock_top = (h->mb_y > !!MB_FIELD(h));
+ deblock_topleft = (sl->mb_x > 0);
+ deblock_top = (sl->mb_y > !!MB_FIELD(h));
}
src_y -= linesize + 1 + pixel_shift;
src_cb -= uvlinesize + 1 + pixel_shift;
src_cr -= uvlinesize + 1 + pixel_shift;
- top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
- top_border = h->top_borders[top_idx][h->mb_x];
+ top_border_m1 = h->top_borders[top_idx][sl->mb_x - 1];
+ top_border = h->top_borders[top_idx][sl->mb_x];
#define XCHG(a, b, xchg) \
if (pixel_shift) { \
@@ -558,8 +558,8 @@ static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl
}
XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
- if (h->mb_x + 1 < h->mb_width) {
- XCHG(h->top_borders[top_idx][h->mb_x + 1],
+ if (sl->mb_x + 1 < h->mb_width) {
+ XCHG(h->top_borders[top_idx][sl->mb_x + 1],
src_y + (17 << pixel_shift), 1);
}
}
@@ -574,9 +574,9 @@ static av_always_inline void xchg_mb_border(H264Context *h, H264SliceContext *sl
XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
- if (h->mb_x + 1 < h->mb_width) {
- XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
- XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
+ if (sl->mb_x + 1 < h->mb_width) {
+ XCHG(h->top_borders[top_idx][sl->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
+ XCHG(h->top_borders[top_idx][sl->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
}
}
} else {
@@ -671,7 +671,7 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h,
uint64_t tr_high;
if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
const int topright_avail = (sl->topright_samples_available << i) & 0x8000;
- assert(h->mb_y || linesize <= block_offset[i]);
+ assert(sl->mb_y || linesize <= block_offset[i]);
if (!topright_avail) {
if (pixel_shift) {
tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c
index 4eca679357..556a2b5dd3 100644
--- a/libavcodec/h264_mb_template.c
+++ b/libavcodec/h264_mb_template.c
@@ -42,8 +42,8 @@
static av_noinline void FUNC(hl_decode_mb)(H264Context *h, H264SliceContext *sl)
{
- const int mb_x = h->mb_x;
- const int mb_y = h->mb_y;
+ const int mb_x = sl->mb_x;
+ const int mb_y = sl->mb_y;
const int mb_xy = sl->mb_xy;
const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest_y, *dest_cb, *dest_cr;
@@ -61,8 +61,8 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h, H264SliceContext *sl)
dest_cb = h->cur_pic.f.data[1] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * h->uvlinesize * block_h;
dest_cr = h->cur_pic.f.data[2] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * h->uvlinesize * block_h;
- h->vdsp.prefetch(dest_y + (h->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT), h->linesize, 4);
- h->vdsp.prefetch(dest_cb + (h->mb_x & 7) * h->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2);
+ h->vdsp.prefetch(dest_y + (sl->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT), h->linesize, 4);
+ h->vdsp.prefetch(dest_cb + (sl->mb_x & 7) * h->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2);
h->list_counts[mb_xy] = sl->list_count;
@@ -82,13 +82,13 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h, H264SliceContext *sl)
continue;
if (IS_16X16(mb_type)) {
int8_t *ref = &sl->ref_cache[list][scan8[0]];
- fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (h->mb_y & 1), 1);
+ fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (sl->mb_y & 1), 1);
} else {
for (i = 0; i < 16; i += 4) {
int ref = sl->ref_cache[list][scan8[i]];
if (ref >= 0)
fill_rectangle(&sl->ref_cache[list][scan8[i]], 2, 2,
- 8, (16 + ref) ^ (h->mb_y & 1), 1);
+ 8, (16 + ref) ^ (sl->mb_y & 1), 1);
}
}
}
@@ -274,8 +274,8 @@ static av_noinline void FUNC(hl_decode_mb)(H264Context *h, H264SliceContext *sl)
static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h, H264SliceContext *sl)
{
- const int mb_x = h->mb_x;
- const int mb_y = h->mb_y;
+ const int mb_x = sl->mb_x;
+ const int mb_y = sl->mb_y;
const int mb_xy = sl->mb_xy;
const int mb_type = h->cur_pic.mb_type[mb_xy];
uint8_t *dest[3];
@@ -288,7 +288,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h, H264SliceContext
for (p = 0; p < plane_count; p++) {
dest[p] = h->cur_pic.f.data[p] +
((mb_x << PIXEL_SHIFT) + mb_y * h->linesize) * 16;
- h->vdsp.prefetch(dest[p] + (h->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT),
+ h->vdsp.prefetch(dest[p] + (sl->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT),
h->linesize, 4);
}
@@ -307,13 +307,13 @@ static av_noinline void FUNC(hl_decode_mb_444)(H264Context *h, H264SliceContext
continue;
if (IS_16X16(mb_type)) {
int8_t *ref = &sl->ref_cache[list][scan8[0]];
- fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (h->mb_y & 1), 1);
+ fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (sl->mb_y & 1), 1);
} else {
for (i = 0; i < 16; i += 4) {
int ref = sl->ref_cache[list][scan8[i]];
if (ref >= 0)
fill_rectangle(&sl->ref_cache[list][scan8[i]], 2, 2,
- 8, (16 + ref) ^ (h->mb_y & 1), 1);
+ 8, (16 + ref) ^ (sl->mb_y & 1), 1);
}
}
}
diff --git a/libavcodec/h264_mc_template.c b/libavcodec/h264_mc_template.c
index 575320a301..f1f906f729 100644
--- a/libavcodec/h264_mc_template.c
+++ b/libavcodec/h264_mc_template.c
@@ -49,7 +49,7 @@ static void mc_part(H264Context *h, H264SliceContext *sl,
int list0, int list1)
{
if ((sl->use_weight == 2 && list0 && list1 &&
- (sl->implicit_weight[sl->ref_cache[0][scan8[n]]][sl->ref_cache[1][scan8[n]]][h->mb_y & 1] != 32)) ||
+ (sl->implicit_weight[sl->ref_cache[0][scan8[n]]][sl->ref_cache[1][scan8[n]]][sl->mb_y & 1] != 32)) ||
sl->use_weight == 1)
mc_part_weighted(h, sl, n, square, height, delta, dest_y, dest_cb, dest_cr,
x_offset, y_offset, qpix_put, chroma_put,
diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h
index ca6323726f..a3616796ba 100644
--- a/libavcodec/h264_mvpred.h
+++ b/libavcodec/h264_mvpred.h
@@ -64,7 +64,7 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, H264SliceContext *
if (!MB_FIELD(h) && IS_INTERLACED(sl->left_type[0])) {
SET_DIAG_MV(* 2, >> 1, sl->left_mb_xy[0] + h->mb_stride,
- (h->mb_y & 1) * 2 + (i >> 5));
+ (sl->mb_y & 1) * 2 + (i >> 5));
}
if (MB_FIELD(h) && !IS_INTERLACED(sl->left_type[0])) {
// left shift will turn LIST_NOT_USED into PART_NOT_AVAILABLE, but that's OK.
@@ -148,7 +148,7 @@ static av_always_inline void pred_motion(H264Context *const h,
tprintf(h->avctx,
"pred_motion (%2d %2d %2d) (%2d %2d %2d) (%2d %2d %2d) -> (%2d %2d %2d) at %2d %2d %d list %d\n",
top_ref, B[0], B[1], diagonal_ref, C[0], C[1], left_ref,
- A[0], A[1], ref, *mx, *my, h->mb_x, h->mb_y, n, list);
+ A[0], A[1], ref, *mx, *my, sl->mb_x, sl->mb_y, n, list);
}
/**
@@ -167,7 +167,7 @@ static av_always_inline void pred_16x8_motion(H264Context *const h,
const int16_t *const B = sl->mv_cache[list][scan8[0] - 8];
tprintf(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
- top_ref, B[0], B[1], h->mb_x, h->mb_y, n, list);
+ top_ref, B[0], B[1], sl->mb_x, sl->mb_y, n, list);
if (top_ref == ref) {
*mx = B[0];
@@ -179,7 +179,7 @@ static av_always_inline void pred_16x8_motion(H264Context *const h,
const int16_t *const A = sl->mv_cache[list][scan8[8] - 1];
tprintf(h->avctx, "pred_16x8: (%2d %2d %2d) at %2d %2d %d list %d\n",
- left_ref, A[0], A[1], h->mb_x, h->mb_y, n, list);
+ left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
if (left_ref == ref) {
*mx = A[0];
@@ -208,7 +208,7 @@ static av_always_inline void pred_8x16_motion(H264Context *const h,
const int16_t *const A = sl->mv_cache[list][scan8[0] - 1];
tprintf(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
- left_ref, A[0], A[1], h->mb_x, h->mb_y, n, list);
+ left_ref, A[0], A[1], sl->mb_x, sl->mb_y, n, list);
if (left_ref == ref) {
*mx = A[0];
@@ -222,7 +222,7 @@ static av_always_inline void pred_8x16_motion(H264Context *const h,
diagonal_ref = fetch_diagonal_mv(h, sl, &C, scan8[4], list, 2);
tprintf(h->avctx, "pred_8x16: (%2d %2d %2d) at %2d %2d %d list %d\n",
- diagonal_ref, C[0], C[1], h->mb_x, h->mb_y, n, list);
+ diagonal_ref, C[0], C[1], sl->mb_x, sl->mb_y, n, list);
if (diagonal_ref == ref) {
*mx = C[0];
@@ -299,7 +299,7 @@ static av_always_inline void pred_pskip_motion(H264Context *const h,
}
tprintf(h->avctx, "pred_pskip: (%d) (%d) at %2d %2d\n",
- top_ref, left_ref, h->mb_x, h->mb_y);
+ top_ref, left_ref, sl->mb_x, sl->mb_y);
if (USES_LIST(sl->topright_type, 0)) {
diagonal_ref = ref[4 * sl->topright_mb_xy + 2];
@@ -378,7 +378,7 @@ static void fill_decode_neighbors(H264Context *h, H264SliceContext *sl, int mb_t
if (FRAME_MBAFF(h)) {
const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
- if (h->mb_y & 1) {
+ if (sl->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] = left_xy[LTOP] = mb_xy - h->mb_stride - 1;
if (curr_mb_field_flag) {
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 64dae07a16..cea213e158 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -719,7 +719,8 @@ static int h264_frame_start(H264Context *h)
return 0;
}
-static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y,
+static av_always_inline void backup_mb_border(H264Context *h, H264SliceContext *sl,
+ uint8_t *src_y,
uint8_t *src_cb, uint8_t *src_cr,
int linesize, int uvlinesize,
int simple)
@@ -735,9 +736,9 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y,
src_cr -= uvlinesize;
if (!simple && FRAME_MBAFF(h)) {
- if (h->mb_y & 1) {
+ if (sl->mb_y & 1) {
if (!MB_MBAFF(h)) {
- top_border = h->top_borders[0][h->mb_x];
+ top_border = h->top_borders[0][sl->mb_x];
AV_COPY128(top_border, src_y + 15 * linesize);
if (pixel_shift)
AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
@@ -777,7 +778,7 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y,
return;
}
- top_border = h->top_borders[top_idx][h->mb_x];
+ top_border = h->top_borders[top_idx][sl->mb_x];
/* There are two lines saved, the line above the top macroblock
* of a pair, and the line above the bottom macroblock. */
AV_COPY128(top_border, src_y + 16 * linesize);
@@ -1207,8 +1208,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
slice_type = get_ue_golomb_31(&h->gb);
if (slice_type > 9) {
av_log(h->avctx, AV_LOG_ERROR,
- "slice type %d too large at %d %d\n",
- slice_type, h->mb_x, h->mb_y);
+ "slice type %d too large at %d\n",
+ slice_type, first_mb_in_slice);
return AVERROR_INVALIDDATA;
}
if (slice_type > 4) {
@@ -1556,12 +1557,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, H264Contex
av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
return AVERROR_INVALIDDATA;
}
- h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
- h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) <<
+ h->resync_mb_x = sl->mb_x = first_mb_in_slice % h->mb_width;
+ h->resync_mb_y = sl->mb_y = (first_mb_in_slice / h->mb_width) <<
FIELD_OR_MBAFF_PICTURE(h);
if (h->picture_structure == PICT_BOTTOM_FIELD)
- h->resync_mb_y = h->mb_y = h->mb_y + 1;
- assert(h->mb_y < h->mb_height);
+ h->resync_mb_y = sl->mb_y = sl->mb_y + 1;
+ assert(sl->mb_y < h->mb_height);
if (h->picture_structure == PICT_FRAME) {
h->curr_pic_num = h->frame_num;
@@ -1895,7 +1896,7 @@ static av_always_inline void fill_filter_caches_inter(H264Context *h,
}
{
- int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
+ int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
@@ -1924,7 +1925,7 @@ static int fill_filter_caches(H264Context *h, H264SliceContext *sl, int mb_type)
if (FRAME_MBAFF(h)) {
const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
- if (h->mb_y & 1) {
+ if (sl->mb_y & 1) {
if (left_mb_field_flag != curr_mb_field_flag)
left_xy[LTOP] -= h->mb_stride;
} else {
@@ -2056,7 +2057,7 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
{
uint8_t *dest_y, *dest_cb, *dest_cr;
int linesize, uvlinesize, mb_x, mb_y;
- const int end_mb_y = h->mb_y + FRAME_MBAFF(h);
+ const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
const int old_slice_type = sl->slice_type;
const int pixel_shift = h->pixel_shift;
const int block_h = 16 >> h->chroma_y_shift;
@@ -2074,8 +2075,8 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
h->mb_mbaff =
h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
- h->mb_x = mb_x;
- h->mb_y = mb_y;
+ sl->mb_x = mb_x;
+ sl->mb_y = mb_y;
dest_y = h->cur_pic.f.data[0] +
((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
dest_cb = h->cur_pic.f.data[1] +
@@ -2098,7 +2099,7 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
linesize = sl->mb_linesize = h->linesize;
uvlinesize = sl->mb_uvlinesize = h->uvlinesize;
}
- backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
+ backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
uvlinesize, 0);
if (fill_filter_caches(h, sl, mb_type))
continue;
@@ -2115,15 +2116,15 @@ static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int e
}
}
sl->slice_type = old_slice_type;
- h->mb_x = end_x;
- h->mb_y = end_mb_y - FRAME_MBAFF(h);
+ sl->mb_x = end_x;
+ sl->mb_y = end_mb_y - FRAME_MBAFF(h);
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale);
}
static void predict_field_decoding_flag(H264Context *h, H264SliceContext *sl)
{
- const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
+ const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
h->cur_pic.mb_type[mb_xy - 1] :
(h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
@@ -2136,7 +2137,7 @@ static void predict_field_decoding_flag(H264Context *h, H264SliceContext *sl)
*/
static void decode_finish_row(H264Context *h, H264SliceContext *sl)
{
- int top = 16 * (h->mb_y >> FIELD_PICTURE(h));
+ int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
int height = 16 << FRAME_MBAFF(h);
int deblock_border = (16 + 4) << FRAME_MBAFF(h);
@@ -2181,7 +2182,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
{
H264SliceContext *sl = arg;
H264Context *h = sl->h264;
- int lf_x_start = h->mb_x;
+ int lf_x_start = sl->mb_x;
sl->mb_skip_run = -1;
@@ -2211,53 +2212,53 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
// FIXME optimal? or let mb_decode decode 16x32 ?
if (ret >= 0 && FRAME_MBAFF(h)) {
- h->mb_y++;
+ sl->mb_y++;
ret = ff_h264_decode_mb_cabac(h, sl);
if (ret >= 0)
ff_h264_hl_decode_mb(h, sl);
- h->mb_y--;
+ sl->mb_y--;
}
eos = get_cabac_terminate(&sl->cabac);
if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
- er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
- h->mb_y, ER_MB_END);
- if (h->mb_x >= lf_x_start)
- loop_filter(h, sl, lf_x_start, h->mb_x + 1);
+ er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, sl->mb_x - 1,
+ sl->mb_y, ER_MB_END);
+ if (sl->mb_x >= lf_x_start)
+ loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
return 0;
}
if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
av_log(h->avctx, AV_LOG_ERROR,
"error while decoding MB %d %d, bytestream %td\n",
- h->mb_x, h->mb_y,
+ sl->mb_x, sl->mb_y,
sl->cabac.bytestream_end - sl->cabac.bytestream);
- er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, h->mb_x,
- h->mb_y, ER_MB_ERROR);
+ er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, sl->mb_x,
+ sl->mb_y, ER_MB_ERROR);
return AVERROR_INVALIDDATA;
}
- if (++h->mb_x >= h->mb_width) {
- loop_filter(h, sl, lf_x_start, h->mb_x);
- h->mb_x = lf_x_start = 0;
+ if (++sl->mb_x >= h->mb_width) {
+ loop_filter(h, sl, lf_x_start, sl->mb_x);
+ sl->mb_x = lf_x_start = 0;
decode_finish_row(h, sl);
- ++h->mb_y;
+ ++sl->mb_y;
if (FIELD_OR_MBAFF_PICTURE(h)) {
- ++h->mb_y;
- if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
+ ++sl->mb_y;
+ if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
predict_field_decoding_flag(h, sl);
}
}
- if (eos || h->mb_y >= h->mb_height) {
+ if (eos || sl->mb_y >= h->mb_height) {
tprintf(h->avctx, "slice end %d %d\n",
get_bits_count(&h->gb), h->gb.size_in_bits);
- er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
- h->mb_y, ER_MB_END);
- if (h->mb_x > lf_x_start)
- loop_filter(h, sl, lf_x_start, h->mb_x);
+ er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, sl->mb_x - 1,
+ sl->mb_y, ER_MB_END);
+ if (sl->mb_x > lf_x_start)
+ loop_filter(h, sl, lf_x_start, sl->mb_x);
return 0;
}
}
@@ -2270,44 +2271,44 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
// FIXME optimal? or let mb_decode decode 16x32 ?
if (ret >= 0 && FRAME_MBAFF(h)) {
- h->mb_y++;
+ sl->mb_y++;
ret = ff_h264_decode_mb_cavlc(h, sl);
if (ret >= 0)
ff_h264_hl_decode_mb(h, sl);
- h->mb_y--;
+ sl->mb_y--;
}
if (ret < 0) {
av_log(h->avctx, AV_LOG_ERROR,
- "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
- er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, h->mb_x,
- h->mb_y, ER_MB_ERROR);
+ "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
+ er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, sl->mb_x,
+ sl->mb_y, ER_MB_ERROR);
return ret;
}
- if (++h->mb_x >= h->mb_width) {
- loop_filter(h, sl, lf_x_start, h->mb_x);
- h->mb_x = lf_x_start = 0;
+ if (++sl->mb_x >= h->mb_width) {
+ loop_filter(h, sl, lf_x_start, sl->mb_x);
+ sl->mb_x = lf_x_start = 0;
decode_finish_row(h, sl);
- ++h->mb_y;
+ ++sl->mb_y;
if (FIELD_OR_MBAFF_PICTURE(h)) {
- ++h->mb_y;
- if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
+ ++sl->mb_y;
+ if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
predict_field_decoding_flag(h, sl);
}
- if (h->mb_y >= h->mb_height) {
+ if (sl->mb_y >= h->mb_height) {
tprintf(h->avctx, "slice end %d %d\n",
get_bits_count(&h->gb), h->gb.size_in_bits);
if (get_bits_left(&h->gb) == 0) {
er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y,
- h->mb_x - 1, h->mb_y, ER_MB_END);
+ sl->mb_x - 1, sl->mb_y, ER_MB_END);
return 0;
} else {
er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y,
- h->mb_x - 1, h->mb_y, ER_MB_END);
+ sl->mb_x - 1, sl->mb_y, ER_MB_END);
return AVERROR_INVALIDDATA;
}
@@ -2320,14 +2321,14 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
if (get_bits_left(&h->gb) == 0) {
er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y,
- h->mb_x - 1, h->mb_y, ER_MB_END);
- if (h->mb_x > lf_x_start)
- loop_filter(h, sl, lf_x_start, h->mb_x);
+ sl->mb_x - 1, sl->mb_y, ER_MB_END);
+ if (sl->mb_x > lf_x_start)
+ loop_filter(h, sl, lf_x_start, sl->mb_x);
return 0;
} else {
- er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, h->mb_x,
- h->mb_y, ER_MB_ERROR);
+ er_add_slice(h, sl, h->resync_mb_x, h->resync_mb_y, sl->mb_x,
+ sl->mb_y, ER_MB_ERROR);
return AVERROR_INVALIDDATA;
}
@@ -2346,12 +2347,15 @@ int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
{
AVCodecContext *const avctx = h->avctx;
H264Context *hx;
+ H264SliceContext *sl;
int i;
if (h->avctx->hwaccel)
return 0;
if (context_count == 1) {
- return decode_slice(avctx, &h->slice_ctx[0]);
+ int ret = decode_slice(avctx, &h->slice_ctx[0]);
+ h->mb_y = h->slice_ctx[0].mb_y;
+ return ret;
} else {
for (i = 1; i < context_count; i++) {
hx = h->thread_context[i];
@@ -2363,8 +2367,8 @@ int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
/* pull back stuff from slices to master context */
hx = h->thread_context[context_count - 1];
- h->mb_x = hx->mb_x;
- h->mb_y = hx->mb_y;
+ sl = &h->slice_ctx[context_count - 1];
+ h->mb_y = sl->mb_y;
h->droppable = hx->droppable;
h->picture_structure = hx->picture_structure;
for (i = 1; i < context_count; i++)
diff --git a/libavcodec/svq3.c b/libavcodec/svq3.c
index bd35942539..56b5fb40b9 100644
--- a/libavcodec/svq3.c
+++ b/libavcodec/svq3.c
@@ -378,11 +378,11 @@ static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
for (i = 0; i < 16; i += part_height)
for (j = 0; j < 16; j += part_width) {
- const int b_xy = (4 * h->mb_x + (j >> 2)) +
- (4 * h->mb_y + (i >> 2)) * h->b_stride;
+ const int b_xy = (4 * sl->mb_x + (j >> 2)) +
+ (4 * sl->mb_y + (i >> 2)) * h->b_stride;
int dxy;
- x = 16 * h->mb_x + j;
- y = 16 * h->mb_y + i;
+ x = 16 * sl->mb_x + j;
+ y = 16 * sl->mb_y + i;
k = (j >> 2 & 1) + (i >> 1 & 2) +
(j >> 1 & 4) + (i & 8);
@@ -488,20 +488,20 @@ static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
uint32_t vlc;
int8_t *top, *left;
const int mb_xy = sl->mb_xy;
- const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
+ const int b_xy = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride;
- sl->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
- sl->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
+ sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
+ sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
sl->topright_samples_available = 0xFFFF;
if (mb_type == 0) { /* SKIP */
if (h->pict_type == AV_PICTURE_TYPE_P ||
s->next_pic->mb_type[mb_xy] == -1) {
- svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
+ svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
0, 0, 0, 0, 0, 0);
if (h->pict_type == AV_PICTURE_TYPE_B)
- svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
+ svq3_mc_dir_part(s, 16 * sl->mb_x, 16 * sl->mb_y, 16, 16,
0, 0, 0, 0, 1, 1);
mb_type = MB_TYPE_SKIP;
@@ -533,7 +533,7 @@ static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
*/
for (m = 0; m < 2; m++) {
- if (h->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
+ if (sl->mb_x > 0 && sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
for (i = 0; i < 4; i++)
AV_COPY32(sl->mv_cache[m][scan8[0] - 1 + i * 8],
h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
@@ -541,14 +541,14 @@ static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
for (i = 0; i < 4; i++)
AV_ZERO32(sl->mv_cache[m][scan8[0] - 1 + i * 8]);
}
- if (h->mb_y > 0) {
+ if (sl->mb_y > 0) {
memcpy(sl->mv_cache[m][scan8[0] - 1 * 8],
h->cur_pic.motion_val[m][b_xy - h->b_stride],
4 * 2 * sizeof(int16_t));
memset(&sl->ref_cache[m][scan8[0] - 1 * 8],
(sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
- if (h->mb_x < h->mb_width - 1) {
+ if (sl->mb_x < h->mb_width - 1) {
AV_COPY32(sl->mv_cache[m][scan8[0] + 4 - 1 * 8],
h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
sl->ref_cache[m][scan8[0] + 4 - 1 * 8] =
@@ -556,7 +556,7 @@ static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
} else
sl->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
- if (h->mb_x > 0) {
+ if (sl->mb_x > 0) {
AV_COPY32(sl->mv_cache[m][scan8[0] - 1 - 1 * 8],
h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
sl->ref_cache[m][scan8[0] - 1 - 1 * 8] =
@@ -599,13 +599,13 @@ static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
memset(sl->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
if (mb_type == 8) {
- if (h->mb_x > 0) {
+ if (sl->mb_x > 0) {
for (i = 0; i < 4; i++)
sl->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
if (sl->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
sl->left_samples_available = 0x5F5F;
}
- if (h->mb_y > 0) {
+ if (sl->mb_y > 0) {
sl->intra4x4_pred_mode_cache[4 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
sl->intra4x4_pred_mode_cache[5 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
sl->intra4x4_pred_mode_cache[6 + 8 * 0] = sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
@@ -646,8 +646,8 @@ static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
if (mb_type == 8) {
ff_h264_check_intra4x4_pred_mode(h, sl);
- sl->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
- sl->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
+ sl->top_samples_available = (sl->mb_y == 0) ? 0x33FF : 0xFFFF;
+ sl->left_samples_available = (sl->mb_x == 0) ? 0x5F5F : 0xFFFF;
} else {
for (i = 0; i < 4; i++)
memset(&sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
@@ -822,7 +822,7 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
if ((header & 0x9F) == 2) {
i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
sl->mb_skip_run = get_bits(&h->gb, i) -
- (h->mb_y * h->mb_width + h->mb_x);
+ (sl->mb_y * h->mb_width + sl->mb_x);
} else {
skip_bits1(&h->gb);
sl->mb_skip_run = 0;
@@ -845,17 +845,17 @@ static int svq3_decode_slice_header(AVCodecContext *avctx)
skip_bits(&h->gb, 8);
/* reset intra predictors and invalidate motion vector references */
- if (h->mb_x > 0) {
+ if (sl->mb_x > 0) {
memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
-1, 4 * sizeof(int8_t));
- memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
- -1, 8 * sizeof(int8_t) * h->mb_x);
+ memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - sl->mb_x],
+ -1, 8 * sizeof(int8_t) * sl->mb_x);
}
- if (h->mb_y > 0) {
+ if (sl->mb_y > 0) {
memset(sl->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
- -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
+ -1, 8 * sizeof(int8_t) * (h->mb_width - sl->mb_x));
- if (h->mb_x > 0)
+ if (sl->mb_x > 0)
sl->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
}
@@ -1132,7 +1132,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
init_get_bits(&h->gb, buf, 8 * buf_size);
- h->mb_x = h->mb_y = sl->mb_xy = 0;
+ sl->mb_x = sl->mb_y = sl->mb_xy = 0;
if (svq3_decode_slice_header(avctx))
return -1;
@@ -1245,10 +1245,10 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
}
}
- for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
- for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
+ for (sl->mb_y = 0; sl->mb_y < h->mb_height; sl->mb_y++) {
+ for (sl->mb_x = 0; sl->mb_x < h->mb_width; sl->mb_x++) {
unsigned mb_type;
- sl->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
+ sl->mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
((get_bits_count(&h->gb) & 7) == 0 ||
@@ -1270,7 +1270,7 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
mb_type += 4;
if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
av_log(h->avctx, AV_LOG_ERROR,
- "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
+ "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
return -1;
}
@@ -1278,13 +1278,13 @@ static int svq3_decode_frame(AVCodecContext *avctx, void *data,
ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
- h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
+ h->cur_pic.mb_type[sl->mb_x + sl->mb_y * h->mb_stride] =
(h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
}
ff_draw_horiz_band(avctx, &s->cur_pic->f,
s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
- 16 * h->mb_y, 16, h->picture_structure, 0,
+ 16 * sl->mb_y, 16, h->picture_structure, 0,
h->low_delay);
}
diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index ce7643eb9a..481d7d17f5 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -329,7 +329,7 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx,
if (!slice_param)
return -1;
slice_param->slice_data_bit_offset = get_bits_count(&h->gb) + 8; /* bit buffer started beyond nal_unit_type */
- slice_param->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x;
+ slice_param->first_mb_in_slice = (sl->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + sl->mb_x;
slice_param->slice_type = ff_h264_get_slice_type(sl);
slice_param->direct_spatial_mv_pred_flag = sl->slice_type == AV_PICTURE_TYPE_B ? sl->direct_spatial_mv_pred : 0;
slice_param->num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0;