diff options
author | Niklas Haas <git@haasn.dev> | 2021-12-14 14:43:33 +0100 |
---|---|---|
committer | Lynne <dev@lynne.ee> | 2021-12-15 23:38:21 +0100 |
commit | 5317a6366f23f725007a06e695126232173417ab (patch) | |
tree | e8ad56c23c6a80170735869e5649f8c06a4259f6 /libavfilter | |
parent | df46d7cb49ce301d83c1d20cfc4ef47390d47691 (diff) | |
download | ffmpeg-5317a6366f23f725007a06e695126232173417ab.tar.gz |
vf_libplacebo: switch to newer libplacebo helpers
Support for mapping/unmapping hardware frames has been added into
libplacebo itself, so we can scrap this code in favor of using the new
functions. This has the additional benefit of being forwards-compatible
as support for more complicated frame-related state management is added
to libplacebo (e.g. mapping dolby vision metadata).
It's worth pointing out that, technically, this would also allow
`vf_libplacebo` to accept, practically unmodified, other frame types
(e.g. vaapi or drm), or even software input formats. (Although we still
need a vulkan *device* to be available)
To keep things simple, though, retain the current restriction to vulkan
frames. It's possible we could rethink this in a future commit, but for
now I don't want to introduce any more potentially breaking changes.
Diffstat (limited to 'libavfilter')
-rw-r--r-- | libavfilter/vf_libplacebo.c | 91 |
1 files changed, 12 insertions, 79 deletions
diff --git a/libavfilter/vf_libplacebo.c b/libavfilter/vf_libplacebo.c index e2749857d1..7ece9d829e 100644 --- a/libavfilter/vf_libplacebo.c +++ b/libavfilter/vf_libplacebo.c @@ -275,64 +275,18 @@ static void libplacebo_uninit(AVFilterContext *avctx) s->gpu = NULL; } -static int wrap_vkframe(pl_gpu gpu, const AVFrame *frame, int plane, pl_tex *tex) -{ - AVVkFrame *vkf = (AVVkFrame *) frame->data[0]; - const AVHWFramesContext *hwfc = (AVHWFramesContext *) frame->hw_frames_ctx->data; - const AVVulkanFramesContext *vkfc = hwfc->hwctx; - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(hwfc->sw_format); - const VkFormat *vk_fmt = av_vkfmt_from_pixfmt(hwfc->sw_format); - const int chroma = plane == 1 || plane == 2; - - *tex = pl_vulkan_wrap(gpu, pl_vulkan_wrap_params( - .image = vkf->img[plane], - .format = vk_fmt[plane], - .width = AV_CEIL_RSHIFT(frame->width, chroma ? desc->log2_chroma_w : 0), - .height = AV_CEIL_RSHIFT(frame->height, chroma ? desc->log2_chroma_h : 0), - .usage = vkfc->usage, - )); - - if (!*tex) - return AVERROR(ENOMEM); - - pl_vulkan_release(gpu, *tex, vkf->layout[plane], (pl_vulkan_sem) { - .sem = vkf->sem[plane], - .value = vkf->sem_value[plane] - }); - return 0; -} - -static int unwrap_vkframe(pl_gpu gpu, AVFrame *frame, int plane, pl_tex *tex) -{ - AVVkFrame *vkf = (AVVkFrame *) frame->data[0]; - int ok = pl_vulkan_hold_raw(gpu, *tex, &vkf->layout[plane], - (pl_vulkan_sem) { vkf->sem[plane], vkf->sem_value[plane] + 1 }); - vkf->access[plane] = 0; - vkf->sem_value[plane] += !!ok; - return ok ? 0 : AVERROR_EXTERNAL; -} - -static void set_sample_depth(struct pl_frame *out_frame, const AVFrame *frame) -{ - const AVHWFramesContext *hwfc = (AVHWFramesContext *) frame->hw_frames_ctx->data; - pl_fmt fmt = out_frame->planes[0].texture->params.format; - struct pl_bit_encoding *bits = &out_frame->repr.bits; - bits->sample_depth = fmt->component_depth[0]; - - switch (hwfc->sw_format) { - case AV_PIX_FMT_P010: bits->bit_shift = 6; break; - default: break; - } -} - static int process_frames(AVFilterContext *avctx, AVFrame *out, AVFrame *in) { - int err = 0; + int err = 0, ok; LibplaceboContext *s = avctx->priv; struct pl_render_params params; struct pl_frame image, target; - pl_frame_from_avframe(&image, in); - pl_frame_from_avframe(&target, out); + ok = pl_map_avframe(s->gpu, &image, NULL, in); + ok &= pl_map_avframe(s->gpu, &target, NULL, out); + if (!ok) { + err = AVERROR_EXTERNAL; + goto fail; + } if (!s->apply_filmgrain) image.film_grain.type = PL_FILM_GRAIN_NONE; @@ -411,38 +365,17 @@ static int process_frames(AVFilterContext *avctx, AVFrame *out, AVFrame *in) RET(find_scaler(avctx, ¶ms.upscaler, s->upscaler)); RET(find_scaler(avctx, ¶ms.downscaler, s->downscaler)); - /* Ideally, we would persistently wrap all of these AVVkFrames into pl_tex - * objects, but for now we'll just create and destroy a wrapper per frame. - * Note that doing it this way is suboptimal, since it results in the - * creation and destruction of a VkSampler and VkFramebuffer per frame. - * - * FIXME: Can we do better? */ - for (int i = 0; i < image.num_planes; i++) - RET(wrap_vkframe(s->gpu, in, i, &image.planes[i].texture)); - for (int i = 0; i < target.num_planes; i++) - RET(wrap_vkframe(s->gpu, out, i, &target.planes[i].texture)); - - /* Since we-re mapping vkframes manually, the pl_frame helpers don't know - * about the mismatch between the sample format and the color depth. */ - set_sample_depth(&image, in); - set_sample_depth(&target, out); - pl_render_image(s->renderer, &image, &target, ¶ms); - - for (int i = 0; i < image.num_planes; i++) - RET(unwrap_vkframe(s->gpu, in, i, &image.planes[i].texture)); - for (int i = 0; i < target.num_planes; i++) - RET(unwrap_vkframe(s->gpu, out, i, &target.planes[i].texture)); + pl_unmap_avframe(s->gpu, &image); + pl_unmap_avframe(s->gpu, &target); /* Flush the command queues for performance */ pl_gpu_flush(s->gpu); + return 0; - /* fall through */ fail: - for (int i = 0; i < image.num_planes; i++) - pl_tex_destroy(s->gpu, &image.planes[i].texture); - for (int i = 0; i < target.num_planes; i++) - pl_tex_destroy(s->gpu, &target.planes[i].texture); + pl_unmap_avframe(s->gpu, &image); + pl_unmap_avframe(s->gpu, &target); return err; } |