diff options
author | Anton Khirnov <anton@khirnov.net> | 2013-02-03 11:10:05 +0100 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2013-02-15 16:35:16 +0100 |
commit | 2c541554076cc8a72e7145d4da30389ca763f32f (patch) | |
tree | b404051a202e1ffffe4ecee2df1b89f9f92a70f5 /libavcodec/vdpau.c | |
parent | 1d0feb5d1ac04d187b335f0e8d411c9f40b3a885 (diff) | |
download | ffmpeg-2c541554076cc8a72e7145d4da30389ca763f32f.tar.gz |
h264: deMpegEncContextize
Most of the changes are just trivial are just trivial replacements of
fields from MpegEncContext with equivalent fields in H264Context.
Everything in h264* other than h264.c are those trivial changes.
The nontrivial parts are:
1) extracting a simplified version of the frame management code from
mpegvideo.c. We don't need last/next_picture anymore, since h264 uses
its own more complex system already and those were set only to appease
the mpegvideo parts.
2) some tables that need to be allocated/freed in appropriate places.
3) hwaccels -- mostly trivial replacements.
for dxva, the draw_horiz_band() call is moved from
ff_dxva2_common_end_frame() to per-codec end_frame() callbacks,
because it's now different for h264 and MpegEncContext-based
decoders.
4) svq3 -- it does not use h264 complex reference system, so I just
added some very simplistic frame management instead and dropped the
use of ff_h264_frame_start(). Because of this I also had to move some
initialization code to svq3.
Additional fixes for chroma format and bit depth changes by
Janne Grunau <janne-libav@jannau.net>
Signed-off-by: Anton Khirnov <anton@khirnov.net>
Diffstat (limited to 'libavcodec/vdpau.c')
-rw-r--r-- | libavcodec/vdpau.c | 54 |
1 files changed, 23 insertions, 31 deletions
diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c index 3b773868e9..e5c459b414 100644 --- a/libavcodec/vdpau.c +++ b/libavcodec/vdpau.c @@ -48,20 +48,18 @@ int ff_vdpau_common_start_frame(AVCodecContext *avctx, return 0; } -int ff_vdpau_common_end_frame(AVCodecContext *avctx) +int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx) { - MpegEncContext * const s = avctx->priv_data; AVVDPAUContext *hwctx = avctx->hwaccel_context; + MpegEncContext *s = avctx->priv_data; + VdpVideoSurface surf = ff_vdpau_get_surface_id(s->current_picture_ptr); - if (hwctx->bitstream_buffers_used) { - VdpVideoSurface surf = ff_vdpau_get_surface_id(s->current_picture_ptr); + hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info, + hwctx->bitstream_buffers_used, hwctx->bitstream_buffers); - hwctx->render(hwctx->decoder, surf, (void *)&hwctx->info, - hwctx->bitstream_buffers_used, hwctx->bitstream_buffers); + ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); + hwctx->bitstream_buffers_used = 0; - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); - hwctx->bitstream_buffers_used = 0; - } return 0; } @@ -87,15 +85,14 @@ int ff_vdpau_add_buffer(AVCodecContext *avctx, /* Obsolete non-hwaccel VDPAU support below... */ -void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) +void ff_vdpau_h264_set_reference_frames(H264Context *h) { - H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render, *render_ref; VdpReferenceFrameH264 *rf, *rf2; Picture *pic; int i, list, pic_frame_idx; - render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; + render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; assert(render); rf = &render->info.h264.referenceFrames[0]; @@ -156,12 +153,9 @@ void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) } } -void ff_vdpau_add_data_chunk(MpegEncContext *s, - const uint8_t *buf, int buf_size) +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size) { - struct vdpau_render_state *render; - - render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; + struct vdpau_render_state *render = (struct vdpau_render_state*)data; assert(render); render->bitstream_buffers= av_fast_realloc( @@ -176,17 +170,16 @@ void ff_vdpau_add_data_chunk(MpegEncContext *s, render->bitstream_buffers_used++; } -void ff_vdpau_h264_picture_start(MpegEncContext *s) +void ff_vdpau_h264_picture_start(H264Context *h) { - H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render; int i; - render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; + render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; assert(render); for (i = 0; i < 2; ++i) { - int foc = s->current_picture_ptr->field_poc[i]; + int foc = h->cur_pic_ptr->field_poc[i]; if (foc == INT_MAX) foc = 0; render->info.h264.field_order_cnt[i] = foc; @@ -195,21 +188,20 @@ void ff_vdpau_h264_picture_start(MpegEncContext *s) render->info.h264.frame_num = h->frame_num; } -void ff_vdpau_h264_picture_complete(MpegEncContext *s) +void ff_vdpau_h264_picture_complete(H264Context *h) { - H264Context *h = s->avctx->priv_data; struct vdpau_render_state *render; - render = (struct vdpau_render_state *)s->current_picture_ptr->f.data[0]; + render = (struct vdpau_render_state *)h->cur_pic_ptr->f.data[0]; assert(render); render->info.h264.slice_count = h->slice_num; if (render->info.h264.slice_count < 1) return; - render->info.h264.is_reference = (s->current_picture_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE; - render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; - render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; + render->info.h264.is_reference = (h->cur_pic_ptr->f.reference & 3) ? VDP_TRUE : VDP_FALSE; + render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; + render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; render->info.h264.num_ref_frames = h->sps.ref_frame_count; render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; @@ -235,7 +227,7 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s) memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); + ff_h264_draw_horiz_band(h, 0, h->avctx->height); render->bitstream_buffers_used = 0; } @@ -287,7 +279,7 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, render->info.mpeg.forward_reference = last->surface; } - ff_vdpau_add_data_chunk(s, buf, buf_size); + ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); render->info.mpeg.slice_count = slice_count; @@ -357,7 +349,7 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, render->info.vc1.forward_reference = last->surface; } - ff_vdpau_add_data_chunk(s, buf, buf_size); + ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); render->info.vc1.slice_count = 1; @@ -413,7 +405,7 @@ void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf, render->info.mpeg4.forward_reference = last->surface; } - ff_vdpau_add_data_chunk(s, buf, buf_size); + ff_vdpau_add_data_chunk(s->current_picture_ptr->f.data[0], buf, buf_size); ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); render->bitstream_buffers_used = 0; |