diff options
author | Andreas Rheinhardt <andreas.rheinhardt@outlook.com> | 2022-02-06 14:49:23 +0100 |
---|---|---|
committer | Andreas Rheinhardt <andreas.rheinhardt@outlook.com> | 2022-02-09 17:22:35 +0100 |
commit | 02220b88fc38ef9dd4f2d519f5d3e4151258b60c (patch) | |
tree | 5ab09d5c019a822c0a839c6076bccff670986867 /libavcodec/tiff.c | |
parent | f025b8e110b36c1cdb4fb56c4cd57aeca1767b5b (diff) | |
download | ffmpeg-02220b88fc38ef9dd4f2d519f5d3e4151258b60c.tar.gz |
avcodec/thread: Don't use ThreadFrame when unnecessary
The majority of frame-threaded decoders (mainly the intra-only)
need exactly one part of ThreadFrame: The AVFrame. They don't
need the owners nor the progress, yet they had to use it because
ff_thread_(get|release)_buffer() requires it.
This commit changes this and makes these functions work with ordinary
AVFrames; the decoders that need the extra fields for progress
use ff_thread_(get|release)_ext_buffer() which work exactly
as ff_thread_(get|release)_buffer() used to do.
This also avoids some unnecessary allocations of progress AVBuffers,
namely for H.264 and HEVC film grain frames: These frames are not
used for synchronization and therefore don't need a ThreadFrame.
Also move the ThreadFrame structure as well as ff_thread_ref_frame()
to threadframe.h, the header for frame-threaded decoders with
inter-frame dependencies.
Reviewed-by: Anton Khirnov <anton@khirnov.net>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
Diffstat (limited to 'libavcodec/tiff.c')
-rw-r--r-- | libavcodec/tiff.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c index fd85d104dc..923f85d07f 100644 --- a/libavcodec/tiff.c +++ b/libavcodec/tiff.c @@ -1016,7 +1016,7 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, return avpkt->size; } -static int init_image(TiffContext *s, ThreadFrame *frame) +static int init_image(TiffContext *s, AVFrame *frame) { int ret; int create_gray_palette = 0; @@ -1177,11 +1177,11 @@ static int init_image(TiffContext *s, ThreadFrame *frame) return ret; if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) { if (!create_gray_palette) - memcpy(frame->f->data[1], s->palette, sizeof(s->palette)); + memcpy(frame->data[1], s->palette, sizeof(s->palette)); else { /* make default grayscale pal */ int i; - uint32_t *pal = (uint32_t *)frame->f->data[1]; + uint32_t *pal = (uint32_t *)frame->data[1]; for (i = 0; i < 1<<s->bpp; i++) pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101; } @@ -1743,7 +1743,6 @@ static int decode_frame(AVCodecContext *avctx, { TiffContext *const s = avctx->priv_data; AVFrame *const p = data; - ThreadFrame frame = { .f = data }; unsigned off, last_off; int le, ret, plane, planes; int i, j, entries, stride; @@ -1894,7 +1893,7 @@ again: } /* now we have the data and may start decoding */ - if ((ret = init_image(s, &frame)) < 0) + if ((ret = init_image(s, p)) < 0) return ret; if (!s->is_tiled || has_strip_bits) { |