diff options
author | Anton Khirnov <anton@khirnov.net> | 2013-11-29 08:58:10 +0100 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2013-12-09 09:10:14 +0100 |
commit | d4f1188d1a662fed5347e70016da49e01563e8a8 (patch) | |
tree | e0baaa7669f49da3d4469d6a256494285910a645 /libavcodec | |
parent | c9ca220ef26e36abd22085e6fa156c0dbc43bbf0 (diff) | |
download | ffmpeg-d4f1188d1a662fed5347e70016da49e01563e8a8.tar.gz |
dv: use AVFrame API properly
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/dv.c | 2 | ||||
-rw-r--r-- | libavcodec/dv.h | 2 | ||||
-rw-r--r-- | libavcodec/dvdec.c | 40 | ||||
-rw-r--r-- | libavcodec/dvenc.c | 33 |
4 files changed, 38 insertions, 39 deletions
diff --git a/libavcodec/dv.c b/libavcodec/dv.c index 9d57940398..a6f614a425 100644 --- a/libavcodec/dv.c +++ b/libavcodec/dv.c @@ -313,8 +313,6 @@ av_cold int ff_dvvideo_init(AVCodecContext *avctx) s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); - avcodec_get_frame_defaults(&s->picture); - avctx->coded_frame = &s->picture; s->avctx = avctx; avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; diff --git a/libavcodec/dv.h b/libavcodec/dv.h index 79896cbee5..01a4eec49a 100644 --- a/libavcodec/dv.h +++ b/libavcodec/dv.h @@ -34,7 +34,7 @@ typedef struct DVVideoContext { const DVprofile *sys; - AVFrame picture; + AVFrame *frame; AVCodecContext *avctx; uint8_t *buf; diff --git a/libavcodec/dvdec.c b/libavcodec/dvdec.c index 28bf79b633..ef9ba4cd1c 100644 --- a/libavcodec/dvdec.c +++ b/libavcodec/dvdec.c @@ -258,12 +258,12 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) || (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { - y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize)); + y_stride = (s->frame->linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize)); } else { y_stride = (2 << log2_blocksize); } - y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize); - linesize = s->picture.linesize[0] << is_field_mode[mb_index]; + y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << log2_blocksize); + linesize = s->frame->linesize[0] << is_field_mode[mb_index]; mb[0] .idct_put(y_ptr , linesize, block + 0*64); if (s->sys->video_stype == 4) { /* SD 422 */ mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64); @@ -276,19 +276,19 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) block += 4*64; /* idct_put'ting chrominance */ - c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] + + c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] + (mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize); for (j = 2; j; j--) { - uint8_t *c_ptr = s->picture.data[j] + c_offset; + uint8_t *c_ptr = s->frame->data[j] + c_offset; if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint64_t aligned_pixels[64/8]; uint8_t *pixels = (uint8_t*)aligned_pixels; uint8_t *c_ptr1, *ptr1; int x, y; mb->idct_put(pixels, 8, block); - for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) { + for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->frame->linesize[j], pixels += 8) { ptr1 = pixels + (1 << (log2_blocksize - 1)); - c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize); + c_ptr1 = c_ptr + (s->frame->linesize[j] << log2_blocksize); for (x = 0; x < (1 << (log2_blocksize - 1)); x++) { c_ptr[x] = pixels[x]; c_ptr1[x] = ptr1[x]; @@ -297,8 +297,8 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) block += 64; mb++; } else { y_stride = (mb_y == 134) ? (1 << log2_blocksize) : - s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize); - linesize = s->picture.linesize[j] << is_field_mode[mb_index]; + s->frame->linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize); + linesize = s->frame->linesize[j] << is_field_mode[mb_index]; (mb++)-> idct_put(c_ptr , linesize, block); block += 64; if (s->sys->bpm == 8) { (mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64; @@ -327,8 +327,9 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, return -1; /* NOTE: we only accept several full frames */ } - s->picture.key_frame = 1; - s->picture.pict_type = AV_PICTURE_TYPE_I; + s->frame = data; + s->frame->key_frame = 1; + s->frame->pict_type = AV_PICTURE_TYPE_I; avctx->pix_fmt = s->sys->pix_fmt; avctx->time_base = s->sys->time_base; @@ -336,12 +337,12 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, if (ret < 0) return ret; - if (ff_get_buffer(avctx, &s->picture, 0) < 0) { + if (ff_get_buffer(avctx, s->frame, 0) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } - s->picture.interlaced_frame = 1; - s->picture.top_field_first = 0; + s->frame->interlaced_frame = 1; + s->frame->top_field_first = 0; s->buf = buf; avctx->execute(avctx, dv_decode_video_segment, s->sys->work_chunks, NULL, @@ -351,7 +352,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, /* return image */ *got_frame = 1; - av_frame_move_ref(data, &s->picture); /* Determine the codec's sample_aspect ratio from the packet */ vsc_pack = buf + 80*5 + 48 + 5; @@ -364,15 +364,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, return s->sys->frame_size; } -static int dvvideo_close(AVCodecContext *c) -{ - DVVideoContext *s = c->priv_data; - - av_frame_unref(&s->picture); - - return 0; -} - AVCodec ff_dvvideo_decoder = { .name = "dvvideo", .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), @@ -380,7 +371,6 @@ AVCodec ff_dvvideo_decoder = { .id = AV_CODEC_ID_DVVIDEO, .priv_data_size = sizeof(DVVideoContext), .init = ff_dvvideo_init, - .close = dvvideo_close, .decode = dvvideo_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, }; diff --git a/libavcodec/dvenc.c b/libavcodec/dvenc.c index 1e1fccfaa5..73c07f2c0e 100644 --- a/libavcodec/dvenc.c +++ b/libavcodec/dvenc.c @@ -43,6 +43,10 @@ static av_cold int dvvideo_init_encoder(AVCodecContext *avctx) return AVERROR(EINVAL); } + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + dv_vlc_map_tableinit(); return ff_dvvideo_init(avctx); @@ -388,12 +392,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg) if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) || (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { - y_stride = s->picture.linesize[0] << 3; + y_stride = s->frame->linesize[0] << 3; } else { y_stride = 16; } - y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << 3); - linesize = s->picture.linesize[0]; + y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << 3); + linesize = s->frame->linesize[0]; if (s->sys->video_stype == 4) { /* SD 422 */ vs_bit_size += @@ -411,12 +415,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg) enc_blk += 4; /* initializing chrominance blocks */ - c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] + + c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] + (mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3); for (j = 2; j; j--) { - uint8_t *c_ptr = s->picture.data[j] + c_offset; - linesize = s->picture.linesize[j]; - y_stride = (mb_y == 134) ? 8 : (s->picture.linesize[j] << 3); + uint8_t *c_ptr = s->frame->data[j] + c_offset; + linesize = s->frame->linesize[j]; + y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3); if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint8_t* d; uint8_t* b = scratch; @@ -664,10 +668,10 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt, return ret; } - c->pix_fmt = s->sys->pix_fmt; - s->picture = *frame; - s->picture.key_frame = 1; - s->picture.pict_type = AV_PICTURE_TYPE_I; + c->pix_fmt = s->sys->pix_fmt; + s->frame = frame; + c->coded_frame->key_frame = 1; + c->coded_frame->pict_type = AV_PICTURE_TYPE_I; s->buf = pkt->data; c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL, @@ -683,6 +687,12 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt, return 0; } +static int dvvideo_encode_close(AVCodecContext *avctx) +{ + av_frame_free(&avctx->coded_frame); + return 0; +} + AVCodec ff_dvvideo_encoder = { .name = "dvvideo", .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), @@ -691,6 +701,7 @@ AVCodec ff_dvvideo_encoder = { .priv_data_size = sizeof(DVVideoContext), .init = dvvideo_init_encoder, .encode2 = dvvideo_encode_frame, + .close = dvvideo_encode_close, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE |