diff options
author | Andreas Rheinhardt <andreas.rheinhardt@outlook.com> | 2022-02-06 14:49:23 +0100 |
---|---|---|
committer | Andreas Rheinhardt <andreas.rheinhardt@outlook.com> | 2022-02-09 17:22:35 +0100 |
commit | 02220b88fc38ef9dd4f2d519f5d3e4151258b60c (patch) | |
tree | 5ab09d5c019a822c0a839c6076bccff670986867 /libavcodec/hapdec.c | |
parent | f025b8e110b36c1cdb4fb56c4cd57aeca1767b5b (diff) | |
download | ffmpeg-02220b88fc38ef9dd4f2d519f5d3e4151258b60c.tar.gz |
avcodec/thread: Don't use ThreadFrame when unnecessary
The majority of frame-threaded decoders (mainly the intra-only)
need exactly one part of ThreadFrame: The AVFrame. They don't
need the owners nor the progress, yet they had to use it because
ff_thread_(get|release)_buffer() requires it.
This commit changes this and makes these functions work with ordinary
AVFrames; the decoders that need the extra fields for progress
use ff_thread_(get|release)_ext_buffer() which work exactly
as ff_thread_(get|release)_buffer() used to do.
This also avoids some unnecessary allocations of progress AVBuffers,
namely for H.264 and HEVC film grain frames: These frames are not
used for synchronization and therefore don't need a ThreadFrame.
Also move the ThreadFrame structure as well as ff_thread_ref_frame()
to threadframe.h, the header for frame-threaded decoders with
inter-frame dependencies.
Reviewed-by: Anton Khirnov <anton@khirnov.net>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
Diffstat (limited to 'libavcodec/hapdec.c')
-rw-r--r-- | libavcodec/hapdec.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/libavcodec/hapdec.c b/libavcodec/hapdec.c index 45c44ad78d..9f8dadc43d 100644 --- a/libavcodec/hapdec.c +++ b/libavcodec/hapdec.c @@ -305,7 +305,7 @@ static int hap_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { HapContext *ctx = avctx->priv_data; - ThreadFrame tframe; + AVFrame *const frame = data; int ret, i, t; int section_size; enum HapSectionType section_type; @@ -330,8 +330,7 @@ static int hap_decode(AVCodecContext *avctx, void *data, } /* Get the output frame ready to receive data */ - tframe.f = data; - ret = ff_thread_get_buffer(avctx, &tframe, 0); + ret = ff_thread_get_buffer(avctx, frame, 0); if (ret < 0) return ret; @@ -383,16 +382,15 @@ static int hap_decode(AVCodecContext *avctx, void *data, /* Use the decompress function on the texture, one block per thread */ if (t == 0){ - avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count); + avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count); } else{ - tframe.f = data; - avctx->execute2(avctx, decompress_texture2_thread, tframe.f, NULL, ctx->slice_count); + avctx->execute2(avctx, decompress_texture2_thread, frame, NULL, ctx->slice_count); } } /* Frame is ready to be output */ - tframe.f->pict_type = AV_PICTURE_TYPE_I; - tframe.f->key_frame = 1; + frame->pict_type = AV_PICTURE_TYPE_I; + frame->key_frame = 1; *got_frame = 1; return avpkt->size; |