diff options
author | wm4 <nfxjfg@googlemail.com> | 2017-10-28 19:53:38 +0200 |
---|---|---|
committer | James Almer <jamrial@gmail.com> | 2017-11-11 20:33:45 -0300 |
commit | 7546964f96168cd6ac819ef4c3212ee586619f1a (patch) | |
tree | 69c2613e791f6ec03087b01fd43cfae3741465fd | |
parent | 48e4eda11d537c6ed52d1000aaa6ce5cbb641e25 (diff) | |
download | ffmpeg-7546964f96168cd6ac819ef4c3212ee586619f1a.tar.gz |
nvdec: add frames_params support
-rw-r--r-- | libavcodec/nvdec.c | 74 | ||||
-rw-r--r-- | libavcodec/nvdec.h | 5 | ||||
-rw-r--r-- | libavcodec/nvdec_h264.c | 8 | ||||
-rw-r--r-- | libavcodec/nvdec_hevc.c | 8 |
4 files changed, 57 insertions, 38 deletions
diff --git a/libavcodec/nvdec.c b/libavcodec/nvdec.c index db338accfa..e4babad43e 100644 --- a/libavcodec/nvdec.c +++ b/libavcodec/nvdec.c @@ -185,7 +185,7 @@ int ff_nvdec_decode_uninit(AVCodecContext *avctx) return 0; } -int ff_nvdec_decode_init(AVCodecContext *avctx, unsigned int dpb_size) +int ff_nvdec_decode_init(AVCodecContext *avctx) { NVDECContext *ctx = avctx->internal->hwaccel_priv_data; @@ -214,37 +214,12 @@ int ff_nvdec_decode_init(AVCodecContext *avctx, unsigned int dpb_size) return AVERROR(ENOSYS); } - if (avctx->thread_type & FF_THREAD_FRAME) - dpb_size += avctx->thread_count; - if (!avctx->hw_frames_ctx) { - AVHWFramesContext *frames_ctx; - - if (!avctx->hw_device_ctx) { - av_log(avctx, AV_LOG_ERROR, "A hardware device or frames context " - "is required for CUVID decoding.\n"); - return AVERROR(EINVAL); - } - - avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); - if (!avctx->hw_frames_ctx) - return AVERROR(ENOMEM); - frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; - - frames_ctx->format = AV_PIX_FMT_CUDA; - frames_ctx->width = avctx->coded_width; - frames_ctx->height = avctx->coded_height; - frames_ctx->sw_format = AV_PIX_FMT_NV12; - frames_ctx->sw_format = sw_desc->comp[0].depth > 8 ? - AV_PIX_FMT_P010 : AV_PIX_FMT_NV12; - frames_ctx->initial_pool_size = dpb_size; - - ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Error initializing internal frames context\n"); + ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_CUDA); + if (ret < 0) return ret; - } } + frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; params.ulWidth = avctx->coded_width; @@ -256,7 +231,7 @@ int ff_nvdec_decode_init(AVCodecContext *avctx, unsigned int dpb_size) cudaVideoSurfaceFormat_P016 : cudaVideoSurfaceFormat_NV12; params.CodecType = cuvid_codec_type; params.ChromaFormat = cuvid_chroma_format; - params.ulNumDecodeSurfaces = dpb_size; + params.ulNumDecodeSurfaces = frames_ctx->initial_pool_size; params.ulNumOutputSurfaces = 1; ret = nvdec_decoder_create(&ctx->decoder_ref, frames_ctx->device_ref, ¶ms, avctx); @@ -268,7 +243,7 @@ int ff_nvdec_decode_init(AVCodecContext *avctx, unsigned int dpb_size) ret = AVERROR(ENOMEM); goto fail; } - pool->dpb_size = dpb_size; + pool->dpb_size = frames_ctx->initial_pool_size; ctx->decoder_pool = av_buffer_pool_init2(sizeof(int), pool, nvdec_decoder_frame_alloc, av_free); @@ -430,3 +405,40 @@ finish: return ret; } + +int ff_nvdec_frame_params(AVCodecContext *avctx, + AVBufferRef *hw_frames_ctx, + int dpb_size) +{ + AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data; + const AVPixFmtDescriptor *sw_desc; + int cuvid_codec_type, cuvid_chroma_format; + + sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); + if (!sw_desc) + return AVERROR_BUG; + + cuvid_codec_type = map_avcodec_id(avctx->codec_id); + if (cuvid_codec_type < 0) { + av_log(avctx, AV_LOG_ERROR, "Unsupported codec ID\n"); + return AVERROR_BUG; + } + + cuvid_chroma_format = map_chroma_format(avctx->sw_pix_fmt); + if (cuvid_chroma_format < 0) { + av_log(avctx, AV_LOG_VERBOSE, "Unsupported chroma format\n"); + return AVERROR(EINVAL); + } + + if (avctx->thread_type & FF_THREAD_FRAME) + dpb_size += avctx->thread_count; + + frames_ctx->format = AV_PIX_FMT_CUDA; + frames_ctx->width = avctx->coded_width; + frames_ctx->height = avctx->coded_height; + frames_ctx->sw_format = sw_desc->comp[0].depth > 8 ? + AV_PIX_FMT_P010 : AV_PIX_FMT_NV12; + frames_ctx->initial_pool_size = dpb_size; + + return 0; +} diff --git a/libavcodec/nvdec.h b/libavcodec/nvdec.h index 18a64cd445..14d29ee94b 100644 --- a/libavcodec/nvdec.h +++ b/libavcodec/nvdec.h @@ -54,9 +54,12 @@ typedef struct NVDECContext { unsigned int slice_offsets_allocated; } NVDECContext; -int ff_nvdec_decode_init(AVCodecContext *avctx, unsigned int dpb_size); +int ff_nvdec_decode_init(AVCodecContext *avctx); int ff_nvdec_decode_uninit(AVCodecContext *avctx); int ff_nvdec_start_frame(AVCodecContext *avctx, AVFrame *frame); int ff_nvdec_end_frame(AVCodecContext *avctx); +int ff_nvdec_frame_params(AVCodecContext *avctx, + AVBufferRef *hw_frames_ctx, + int dpb_size); #endif /* AVCODEC_NVDEC_H */ diff --git a/libavcodec/nvdec_h264.c b/libavcodec/nvdec_h264.c index 75dd4b2eb8..b0e756c734 100644 --- a/libavcodec/nvdec_h264.c +++ b/libavcodec/nvdec_h264.c @@ -155,11 +155,12 @@ static int nvdec_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, return 0; } -static int nvdec_h264_decode_init(AVCodecContext *avctx) +static int nvdec_h264_frame_params(AVCodecContext *avctx, + AVBufferRef *hw_frames_ctx) { const H264Context *h = avctx->priv_data; const SPS *sps = h->ps.sps; - return ff_nvdec_decode_init(avctx, sps->ref_frame_count + sps->num_reorder_frames); + return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames); } AVHWAccel ff_h264_nvdec_hwaccel = { @@ -170,7 +171,8 @@ AVHWAccel ff_h264_nvdec_hwaccel = { .start_frame = nvdec_h264_start_frame, .end_frame = ff_nvdec_end_frame, .decode_slice = nvdec_h264_decode_slice, - .init = nvdec_h264_decode_init, + .frame_params = nvdec_h264_frame_params, + .init = ff_nvdec_decode_init, .uninit = ff_nvdec_decode_uninit, .priv_data_size = sizeof(NVDECContext), }; diff --git a/libavcodec/nvdec_hevc.c b/libavcodec/nvdec_hevc.c index 89c1be5f7c..f02a7a15fe 100644 --- a/libavcodec/nvdec_hevc.c +++ b/libavcodec/nvdec_hevc.c @@ -258,11 +258,12 @@ static int nvdec_hevc_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, return 0; } -static int nvdec_hevc_decode_init(AVCodecContext *avctx) +static int nvdec_hevc_frame_params(AVCodecContext *avctx, + AVBufferRef *hw_frames_ctx) { const HEVCContext *s = avctx->priv_data; const HEVCSPS *sps = s->ps.sps; - return ff_nvdec_decode_init(avctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1); + return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1); } AVHWAccel ff_hevc_nvdec_hwaccel = { @@ -273,7 +274,8 @@ AVHWAccel ff_hevc_nvdec_hwaccel = { .start_frame = nvdec_hevc_start_frame, .end_frame = ff_nvdec_end_frame, .decode_slice = nvdec_hevc_decode_slice, - .init = nvdec_hevc_decode_init, + .frame_params = nvdec_hevc_frame_params, + .init = ff_nvdec_decode_init, .uninit = ff_nvdec_decode_uninit, .priv_data_size = sizeof(NVDECContext), }; |