diff options
author | Stefano Sabatini <stefasab@gmail.com> | 2012-09-07 18:09:19 +0200 |
---|---|---|
committer | Stefano Sabatini <stefasab@gmail.com> | 2012-09-11 17:48:07 +0200 |
commit | 2fe8fd394829d44fd2ac5c7ac30ff01d8f228dd0 (patch) | |
tree | b2d231961b676c8ad40b3c055c1c81bcb8aaf06e /doc | |
parent | ec7946853a24855cc4e889f788195f57ce59ccab (diff) | |
download | ffmpeg-2fe8fd394829d44fd2ac5c7ac30ff01d8f228dd0.tar.gz |
examples/demuxing: add audio decoding/demuxing
Diffstat (limited to 'doc')
-rw-r--r-- | doc/examples/demuxing.c | 304 |
1 files changed, 225 insertions, 79 deletions
diff --git a/doc/examples/demuxing.c b/doc/examples/demuxing.c index fca7febfd9..0e0015e742 100644 --- a/doc/examples/demuxing.c +++ b/doc/examples/demuxing.c @@ -25,75 +25,185 @@ * libavformat demuxing API use example. * * Show how to use the libavformat and libavcodec API to demux and - * decode video data. + * decode audio and video data. */ #include <libavutil/imgutils.h> +#include <libavutil/samplefmt.h> #include <libavutil/timestamp.h> #include <libavformat/avformat.h> static AVFormatContext *fmt_ctx = NULL; -static AVCodecContext *dec_ctx = NULL; -static AVCodec *dec = NULL; -static AVStream *stream = NULL; +static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx; +static AVStream *video_stream = NULL, *audio_stream = NULL; static const char *src_filename = NULL; -static const char *dst_filename = NULL; -static FILE *dst_file = NULL; -static uint8_t *dst_data[4] = {NULL}; -static int dst_linesize[4]; -static int dst_bufsize; -static int stream_idx; +static const char *video_dst_filename = NULL; +static const char *audio_dst_filename = NULL; +static FILE *video_dst_file = NULL; +static FILE *audio_dst_file = NULL; + +static uint8_t *video_dst_data[4] = {NULL}; +static int video_dst_linesize[4]; +static int video_dst_bufsize; + +static uint8_t **audio_dst_data = NULL; +static int audio_dst_linesize; +static int audio_dst_bufsize; + +static int video_stream_idx = -1, audio_stream_idx = -1; static AVFrame *frame = NULL; static AVPacket pkt; -static int frame_count = 0; +static int video_frame_count = 0; +static int audio_frame_count = 0; static int decode_packet(int *got_frame, int cached) { - int ret; + int ret = 0; + + if (pkt.stream_index == video_stream_idx) { + /* decode video frame */ + ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt); + if (ret < 0) { + fprintf(stderr, "Error decoding video frame\n"); + return ret; + } + + if (*got_frame) { + printf("video_frame%s n:%d coded_n:%d pts:%s\n", + cached ? "(cached)" : "", + video_frame_count++, frame->coded_picture_number, + av_ts2timestr(frame->pts, &video_dec_ctx->time_base)); + + /* copy decoded frame to destination buffer: + * this is required since rawvideo expects non aligned data */ + av_image_copy(video_dst_data, video_dst_linesize, + (const uint8_t **)(frame->data), frame->linesize, + video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height); + + /* write to rawvideo file */ + fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); + } + } else if (pkt.stream_index == audio_stream_idx) { + /* decode audio frame */ + ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt); + if (ret < 0) { + fprintf(stderr, "Error decoding audio frame\n"); + return ret; + } + + if (*got_frame) { + printf("audio_frame%s n:%d nb_samples:%d pts:%s\n", + cached ? "(cached)" : "", + audio_frame_count++, frame->nb_samples, + av_ts2timestr(frame->pts, &audio_dec_ctx->time_base)); + + ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, frame->channels, + frame->nb_samples, frame->format, 1); + if (ret < 0) { + fprintf(stderr, "Could not allocate audio buffer\n"); + return AVERROR(ENOMEM); + } + + /* TODO: extend return code of the av_samples_* functions so that this call is not needed */ + audio_dst_bufsize = + av_samples_get_buffer_size(NULL, frame->channels, + frame->nb_samples, frame->format, 1); + + /* copy audio data to destination buffer: + * this is required since rawaudio expects non aligned data */ + av_samples_copy(audio_dst_data, frame->data, 0, 0, + frame->nb_samples, frame->channels, frame->format); - if (pkt.stream_index != stream_idx) - return 0; + /* write to rawaudio file */ + fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file); + av_freep(&audio_dst_data[0]); + } + } + + return ret; +} + +static int open_codec_context(int *stream_idx, + AVFormatContext *fmt_ctx, enum AVMediaType type) +{ + int ret; + AVStream *st; + AVCodecContext *dec_ctx = NULL; + AVCodec *dec = NULL; - /* decode video frame */ - ret = avcodec_decode_video2(dec_ctx, frame, got_frame, &pkt); + ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0); if (ret < 0) { - fprintf(stderr, "Error decoding video frame\n"); + fprintf(stderr, "Could not find %s stream in input file '%s'\n", + av_get_media_type_string(type), src_filename); return ret; + } else { + *stream_idx = ret; + st = fmt_ctx->streams[*stream_idx]; + + /* find decoder for the stream */ + dec_ctx = st->codec; + dec = avcodec_find_decoder(dec_ctx->codec_id); + if (!dec) { + fprintf(stderr, "Failed to find %s codec\n", + av_get_media_type_string(type)); + return ret; + } + + if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { + fprintf(stderr, "Failed to open %s codec\n", + av_get_media_type_string(type)); + return ret; + } } - if (*got_frame) { - printf("frame%s n:%d coded_n:%d pts:%s\n", - cached ? "(cached)" : "", - frame_count++, frame->coded_picture_number, - av_ts2timestr(frame->pts, &dec_ctx->time_base)); + return 0; +} - /* copy decoded frame to destination buffer: - * this is required since rawvideo expect non aligned data */ - av_image_copy(dst_data, dst_linesize, - (const uint8_t **)(frame->data), frame->linesize, - dec_ctx->pix_fmt, dec_ctx->width, dec_ctx->height); +static int get_format_from_sample_fmt(const char **fmt, + enum AVSampleFormat sample_fmt) +{ + int i; + struct sample_fmt_entry { + enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le; + } sample_fmt_entries[] = { + { AV_SAMPLE_FMT_U8, "u8", "u8" }, + { AV_SAMPLE_FMT_S16, "s16be", "s16le" }, + { AV_SAMPLE_FMT_S32, "s32be", "s32le" }, + { AV_SAMPLE_FMT_FLT, "f32be", "f32le" }, + { AV_SAMPLE_FMT_DBL, "f64be", "f64le" }, + }; + *fmt = NULL; - /* write to rawvideo file */ - fwrite(dst_data[0], 1, dst_bufsize, dst_file); + for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) { + struct sample_fmt_entry *entry = &sample_fmt_entries[i]; + if (sample_fmt == entry->sample_fmt) { + *fmt = AV_NE(entry->fmt_be, entry->fmt_le); + return 0; + } } - return ret; + fprintf(stderr, + "sample format %s is not supported as output format\n", + av_get_sample_fmt_name(sample_fmt)); + return -1; } int main (int argc, char **argv) { - int ret, got_frame; + int ret = 0, got_frame; - if (argc != 3) { - fprintf(stderr, "usage: %s input_file output_file\n" + if (argc != 4) { + fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n" "API example program to show how to read frames from an input file.\n" - "This program reads frames from a file, decode them, and write them " - "to a rawvideo file named like output_file." + "This program reads frames from a file, decodes them, and writes decoded\n" + "video frames to a rawvideo file named video_output_file, and decoded\n" + "audio frames to a rawaudio file named audio_output_file.\n" "\n", argv[0]); exit(1); } src_filename = argv[1]; - dst_filename = argv[2]; + video_dst_filename = argv[2]; + audio_dst_filename = argv[3]; /* register all formats and codecs */ av_register_all(); @@ -110,60 +220,75 @@ int main (int argc, char **argv) exit(1); } - ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); - if (ret < 0) { - fprintf(stderr, "Could not find video stream in file\n"); - goto end; - } - stream_idx = ret; - stream = fmt_ctx->streams[stream_idx]; - - /* find decoder for the stream */ - dec_ctx = stream->codec; - dec = avcodec_find_decoder(dec_ctx->codec_id); - if (!dec) { - fprintf(stderr, "Failed to find any codec\n"); - ret = 1; - goto end; - } + if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) { + video_stream = fmt_ctx->streams[video_stream_idx]; + video_dec_ctx = video_stream->codec; - if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { - fprintf(stderr, "Failed to open codec\n"); - goto end; + video_dst_file = fopen(video_dst_filename, "wb"); + if (!video_dst_file) { + fprintf(stderr, "Could not open destination file %s\n", video_dst_filename); + ret = 1; + goto end; + } + + /* allocate image where the decoded image will be put */ + ret = av_image_alloc(video_dst_data, video_dst_linesize, + video_dec_ctx->width, video_dec_ctx->height, + video_dec_ctx->pix_fmt, 1); + if (ret < 0) { + fprintf(stderr, "Could not allocate raw video buffer\n"); + goto end; + } + video_dst_bufsize = ret; } /* dump input information to stderr */ av_dump_format(fmt_ctx, 0, src_filename, 0); - dst_file = fopen(dst_filename, "wb"); - if (!dst_file) { - fprintf(stderr, "Could not open destination file %s\n", dst_filename); - ret = 1; - goto end; + if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) { + int nb_planes; + + audio_stream = fmt_ctx->streams[audio_stream_idx]; + audio_dec_ctx = audio_stream->codec; + audio_dst_file = fopen(audio_dst_filename, "wb"); + if (!audio_dst_file) { + fprintf(stderr, "Could not open destination file %s\n", video_dst_filename); + ret = 1; + goto end; + } + + nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ? + audio_dec_ctx->channels : 1; + audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes); + if (!audio_dst_data) { + fprintf(stderr, "Could not allocate audio data buffers\n"); + ret = AVERROR(ENOMEM); + goto end; + } } - frame = avcodec_alloc_frame(); - if (!frame) { - fprintf(stderr, "Could not allocate video frame\n"); + if (!audio_stream && !video_stream) { + fprintf(stderr, "Could not find audio or video stream in the input, aborting\n"); ret = 1; goto end; } - /* allocate image where the decoded image will be put */ - ret = av_image_alloc(dst_data, dst_linesize, - dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 1); - if (ret < 0) { - fprintf(stderr, "Could not alloc raw video buffer\n"); + frame = avcodec_alloc_frame(); + if (!frame) { + fprintf(stderr, "Could not allocate frame\n"); + ret = AVERROR(ENOMEM); goto end; } - dst_bufsize = ret; /* initialize packet, set data to NULL, let the demuxer fill it */ av_init_packet(&pkt); - pkt.size = 0; pkt.data = NULL; + pkt.size = 0; - printf("Demuxing file '%s' to '%s'\n", src_filename, dst_filename); + if (video_stream) + printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename); + if (audio_stream) + printf("Demuxing video from file '%s' into '%s'\n", src_filename, audio_dst_filename); /* read frames from the file */ while (av_read_frame(fmt_ctx, &pkt) >= 0) @@ -176,18 +301,39 @@ int main (int argc, char **argv) decode_packet(&got_frame, 1); } while (got_frame); - printf("Demuxing succeeded. Play the output file with the command:\n" - "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", - av_get_pix_fmt_name(dec_ctx->pix_fmt), dec_ctx->width, dec_ctx->height, - dst_filename); + printf("Demuxing succeeded.\n"); + + if (video_stream) { + printf("Play the output video file with the command:\n" + "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", + av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height, + video_dst_filename); + } + + if (audio_stream) { + const char *fmt; + + if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0)) + goto end; + printf("Play the output audio file with the command:\n" + "ffplay -f %s -ac %d -ar %d %s\n", + fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate, + audio_dst_filename); + } end: - avcodec_close(dec_ctx); + if (video_dec_ctx) + avcodec_close(video_dec_ctx); + if (audio_dec_ctx) + avcodec_close(audio_dec_ctx); avformat_close_input(&fmt_ctx); - if (dst_file) - fclose(dst_file); + if (video_dst_file) + fclose(video_dst_file); + if (audio_dst_file) + fclose(audio_dst_file); av_free(frame); - av_free(dst_data[0]); + av_free(video_dst_data[0]); + av_free(audio_dst_data); return ret < 0; } |