aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec/utils.c
diff options
context:
space:
mode:
authorAnton Khirnov <anton@khirnov.net>2016-10-26 08:10:19 +0200
committerAnton Khirnov <anton@khirnov.net>2016-11-29 10:39:20 +0100
commit328cd2b599bc2d0d38f3c12606fa2a66eeec016e (patch)
tree9bb6a4c661f3184e8c9566eec3d11f148676ec49 /libavcodec/utils.c
parent45d199d5b0b7f09eb9baa29929a3bd07ed46223b (diff)
downloadffmpeg-328cd2b599bc2d0d38f3c12606fa2a66eeec016e.tar.gz
lavc: move encoding-related code from utils.c to a new file
Diffstat (limited to 'libavcodec/utils.c')
-rw-r--r--libavcodec/utils.c331
1 files changed, 0 insertions, 331 deletions
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 329233d473..d8ba1d59c9 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -1210,244 +1210,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
goto end;
}
-int ff_alloc_packet(AVPacket *avpkt, int size)
-{
- if (size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(EINVAL);
-
- if (avpkt->data) {
- AVBufferRef *buf = avpkt->buf;
-
- if (avpkt->size < size)
- return AVERROR(EINVAL);
-
- av_init_packet(avpkt);
- avpkt->buf = buf;
- avpkt->size = size;
- return 0;
- } else {
- return av_new_packet(avpkt, size);
- }
-}
-
-/**
- * Pad last frame with silence.
- */
-static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
-{
- AVFrame *frame = NULL;
- int ret;
-
- if (!(frame = av_frame_alloc()))
- return AVERROR(ENOMEM);
-
- frame->format = src->format;
- frame->channel_layout = src->channel_layout;
- frame->nb_samples = s->frame_size;
- ret = av_frame_get_buffer(frame, 32);
- if (ret < 0)
- goto fail;
-
- ret = av_frame_copy_props(frame, src);
- if (ret < 0)
- goto fail;
-
- if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
- src->nb_samples, s->channels, s->sample_fmt)) < 0)
- goto fail;
- if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
- frame->nb_samples - src->nb_samples,
- s->channels, s->sample_fmt)) < 0)
- goto fail;
-
- *dst = frame;
-
- return 0;
-
-fail:
- av_frame_free(&frame);
- return ret;
-}
-
-int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
-{
- AVFrame tmp;
- AVFrame *padded_frame = NULL;
- int ret;
- int user_packet = !!avpkt->data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- return 0;
- }
-
- /* ensure that extended_data is properly set */
- if (frame && !frame->extended_data) {
- if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
- avctx->channels > AV_NUM_DATA_POINTERS) {
- av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
- "with more than %d channels, but extended_data is not set.\n",
- AV_NUM_DATA_POINTERS);
- return AVERROR(EINVAL);
- }
- av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
-
- tmp = *frame;
- tmp.extended_data = tmp.data;
- frame = &tmp;
- }
-
- /* extract audio service type metadata */
- if (frame) {
- AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
- if (sd && sd->size >= sizeof(enum AVAudioServiceType))
- avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
- }
-
- /* check for valid frame size */
- if (frame) {
- if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
- if (frame->nb_samples > avctx->frame_size)
- return AVERROR(EINVAL);
- } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
- if (frame->nb_samples < avctx->frame_size &&
- !avctx->internal->last_audio_frame) {
- ret = pad_last_frame(avctx, &padded_frame, frame);
- if (ret < 0)
- return ret;
-
- frame = padded_frame;
- avctx->internal->last_audio_frame = 1;
- }
-
- if (frame->nb_samples != avctx->frame_size) {
- ret = AVERROR(EINVAL);
- goto end;
- }
- }
- }
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- if (!ret) {
- if (*got_packet_ptr) {
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
- if (avpkt->pts == AV_NOPTS_VALUE)
- avpkt->pts = frame->pts;
- if (!avpkt->duration)
- avpkt->duration = ff_samples_to_time_base(avctx,
- frame->nb_samples);
- }
- avpkt->dts = avpkt->pts;
- } else {
- avpkt->size = 0;
- }
-
- if (!user_packet && avpkt->size) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- goto end;
- }
-
- /* NOTE: if we add any audio encoders which output non-keyframe packets,
- * this needs to be moved to the encoders, but for now we can do it
- * here to simplify things */
- avpkt->flags |= AV_PKT_FLAG_KEY;
-
-end:
- av_frame_free(&padded_frame);
-
-#if FF_API_AUDIOENC_DELAY
- avctx->delay = avctx->initial_padding;
-#endif
-
- return ret;
-}
-
-int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
-{
- int ret;
- int user_packet = !!avpkt->data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- avpkt->size = 0;
- return 0;
- }
-
- if (av_image_check_size(avctx->width, avctx->height, 0, avctx))
- return AVERROR(EINVAL);
-
- av_assert0(avctx->codec->encode2);
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- if (!ret) {
- if (!*got_packet_ptr)
- avpkt->size = 0;
- else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- avpkt->pts = avpkt->dts = frame->pts;
-
- if (!user_packet && avpkt->size) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr)
- av_packet_unref(avpkt);
-
- emms_c();
- return ret;
-}
-
-int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
- const AVSubtitle *sub)
-{
- int ret;
- if (sub->start_display_time) {
- av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
- return -1;
- }
- if (sub->num_rects == 0 || !sub->rects)
- return -1;
- ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
- avctx->frame_number++;
- return ret;
-}
-
static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt)
{
int size = 0, ret;
@@ -1849,99 +1611,6 @@ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *fr
return 0;
}
-static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet)
-{
- int ret;
- *got_packet = 0;
-
- av_packet_unref(avctx->internal->buffer_pkt);
- avctx->internal->buffer_pkt_valid = 0;
-
- if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt,
- frame, got_packet);
- } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt,
- frame, got_packet);
- } else {
- ret = AVERROR(EINVAL);
- }
-
- if (ret >= 0 && *got_packet) {
- // Encoders must always return ref-counted buffers.
- // Side-data only packets have no data and can be not ref-counted.
- av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf);
- avctx->internal->buffer_pkt_valid = 1;
- ret = 0;
- } else {
- av_packet_unref(avctx->internal->buffer_pkt);
- }
-
- return ret;
-}
-
-int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
-{
- if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->internal->draining)
- return AVERROR_EOF;
-
- if (!frame) {
- avctx->internal->draining = 1;
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return 0;
- }
-
- if (avctx->codec->send_frame)
- return avctx->codec->send_frame(avctx, frame);
-
- // Emulation via old API. Do it here instead of avcodec_receive_packet, because:
- // 1. if the AVFrame is not refcounted, the copying will be much more
- // expensive than copying the packet data
- // 2. assume few users use non-refcounted AVPackets, so usually no copy is
- // needed
-
- if (avctx->internal->buffer_pkt_valid)
- return AVERROR(EAGAIN);
-
- return do_encode(avctx, frame, &(int){0});
-}
-
-int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
-{
- av_packet_unref(avpkt);
-
- if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->codec->receive_packet) {
- if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return AVERROR_EOF;
- return avctx->codec->receive_packet(avctx, avpkt);
- }
-
- // Emulation via old API.
-
- if (!avctx->internal->buffer_pkt_valid) {
- int got_packet;
- int ret;
- if (!avctx->internal->draining)
- return AVERROR(EAGAIN);
- ret = do_encode(avctx, NULL, &got_packet);
- if (ret < 0)
- return ret;
- if (ret >= 0 && !got_packet)
- return AVERROR_EOF;
- }
-
- av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
- avctx->internal->buffer_pkt_valid = 0;
- return 0;
-}
-
av_cold int avcodec_close(AVCodecContext *avctx)
{
int i;