aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMuhammad Faiz <mfcc64@gmail.com>2017-04-22 15:58:03 +0700
committerMuhammad Faiz <mfcc64@gmail.com>2017-04-23 14:43:31 +0700
commit8893c943a9bdbe8ed2c0f99ca986d6f662d58185 (patch)
tree291ed775e2bd976c81c01139e8a9a91ef2be29d4
parent6af050d7d0c3c73f3d62115152db82ebd2dc5d57 (diff)
downloadffmpeg-8893c943a9bdbe8ed2c0f99ca986d6f662d58185.tar.gz
ff*: do not use AVFrame accessor
Reviewed-by: wm4 <nfxjfg@googlemail.com> Signed-off-by: Muhammad Faiz <mfcc64@gmail.com>
-rw-r--r--ffmpeg.c10
-rw-r--r--ffmpeg_filter.c2
-rw-r--r--ffplay.c24
-rw-r--r--ffprobe.c22
4 files changed, 29 insertions, 29 deletions
diff --git a/ffmpeg.c b/ffmpeg.c
index 75f5e592a9..70431e8808 100644
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -1062,8 +1062,8 @@ static void do_video_out(OutputFile *of,
!ost->filters &&
next_picture &&
ist &&
- lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
- duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
+ lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
+ duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
}
if (!next_picture) {
@@ -1506,7 +1506,7 @@ static int reap_filters(int flush)
break;
case AVMEDIA_TYPE_AUDIO:
if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
- enc->channels != av_frame_get_channels(filtered_frame)) {
+ enc->channels != filtered_frame->channels) {
av_log(NULL, AV_LOG_ERROR,
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
break;
@@ -2126,7 +2126,7 @@ static void check_decode_result(InputStream *ist, int *got_output, int ret)
exit_program(1);
if (exit_on_error && *got_output && ist) {
- if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
+ if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
exit_program(1);
}
@@ -2455,7 +2455,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eo
}
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
- best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
+ best_effort_timestamp= decoded_frame->best_effort_timestamp;
if (ist->framerate.num)
best_effort_timestamp = ist->cfr_next_pts++;
diff --git a/ffmpeg_filter.c b/ffmpeg_filter.c
index 219e473f69..896161a869 100644
--- a/ffmpeg_filter.c
+++ b/ffmpeg_filter.c
@@ -1162,7 +1162,7 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
ifilter->sample_rate = frame->sample_rate;
- ifilter->channels = av_frame_get_channels(frame);
+ ifilter->channels = frame->channels;
ifilter->channel_layout = frame->channel_layout;
if (frame->hw_frames_ctx) {
diff --git a/ffplay.c b/ffplay.c
index 763fd9e645..139da9f872 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -569,7 +569,7 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
ret = avcodec_receive_frame(d->avctx, frame);
if (ret >= 0) {
if (decoder_reorder_pts == -1) {
- frame->pts = av_frame_get_best_effort_timestamp(frame);
+ frame->pts = frame->best_effort_timestamp;
} else if (!decoder_reorder_pts) {
frame->pts = frame->pkt_dts;
}
@@ -1981,11 +1981,11 @@ static int audio_thread(void *arg)
tb = (AVRational){1, frame->sample_rate};
#if CONFIG_AVFILTER
- dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
+ dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
reconfigure =
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
- frame->format, av_frame_get_channels(frame)) ||
+ frame->format, frame->channels) ||
is->audio_filter_src.channel_layout != dec_channel_layout ||
is->audio_filter_src.freq != frame->sample_rate ||
is->auddec.pkt_serial != last_serial;
@@ -1997,10 +1997,10 @@ static int audio_thread(void *arg)
av_log(NULL, AV_LOG_DEBUG,
"Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
- frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
+ frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
is->audio_filter_src.fmt = frame->format;
- is->audio_filter_src.channels = av_frame_get_channels(frame);
+ is->audio_filter_src.channels = frame->channels;
is->audio_filter_src.channel_layout = dec_channel_layout;
is->audio_filter_src.freq = frame->sample_rate;
last_serial = is->auddec.pkt_serial;
@@ -2019,7 +2019,7 @@ static int audio_thread(void *arg)
goto the_end;
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
- af->pos = av_frame_get_pkt_pos(frame);
+ af->pos = frame->pkt_pos;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
@@ -2146,7 +2146,7 @@ static int video_thread(void *arg)
#endif
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
- ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
+ ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
av_frame_unref(frame);
#if CONFIG_AVFILTER
}
@@ -2290,13 +2290,13 @@ static int audio_decode_frame(VideoState *is)
frame_queue_next(&is->sampq);
} while (af->serial != is->audioq.serial);
- data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
+ data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
af->frame->nb_samples,
af->frame->format, 1);
dec_channel_layout =
- (af->frame->channel_layout && av_frame_get_channels(af->frame) == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
- af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
+ (af->frame->channel_layout && af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
+ af->frame->channel_layout : av_get_default_channel_layout(af->frame->channels);
wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
if (af->frame->format != is->audio_src.fmt ||
@@ -2311,13 +2311,13 @@ static int audio_decode_frame(VideoState *is)
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
av_log(NULL, AV_LOG_ERROR,
"Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
- af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), av_frame_get_channels(af->frame),
+ af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->channels,
is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
swr_free(&is->swr_ctx);
return -1;
}
is->audio_src.channel_layout = dec_channel_layout;
- is->audio_src.channels = av_frame_get_channels(af->frame);
+ is->audio_src.channels = af->frame->channels;
is->audio_src.freq = af->frame->sample_rate;
is->audio_src.fmt = af->frame->format;
}
diff --git a/ffprobe.c b/ffprobe.c
index 72f5ed7182..ce964e0093 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -2049,13 +2049,13 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_time("pkt_pts_time", frame->pts, &stream->time_base);
print_ts ("pkt_dts", frame->pkt_dts);
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
- print_ts ("best_effort_timestamp", av_frame_get_best_effort_timestamp(frame));
- print_time("best_effort_timestamp_time", av_frame_get_best_effort_timestamp(frame), &stream->time_base);
- print_duration_ts ("pkt_duration", av_frame_get_pkt_duration(frame));
- print_duration_time("pkt_duration_time", av_frame_get_pkt_duration(frame), &stream->time_base);
- if (av_frame_get_pkt_pos (frame) != -1) print_fmt ("pkt_pos", "%"PRId64, av_frame_get_pkt_pos(frame));
+ print_ts ("best_effort_timestamp", frame->best_effort_timestamp);
+ print_time("best_effort_timestamp_time", frame->best_effort_timestamp, &stream->time_base);
+ print_duration_ts ("pkt_duration", frame->pkt_duration);
+ print_duration_time("pkt_duration_time", frame->pkt_duration, &stream->time_base);
+ if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos);
else print_str_opt("pkt_pos", "N/A");
- if (av_frame_get_pkt_size(frame) != -1) print_val ("pkt_size", av_frame_get_pkt_size(frame), unit_byte_str);
+ if (frame->pkt_size != -1) print_val ("pkt_size", frame->pkt_size, unit_byte_str);
else print_str_opt("pkt_size", "N/A");
switch (stream->codecpar->codec_type) {
@@ -2086,18 +2086,18 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
if (s) print_str ("sample_fmt", s);
else print_str_opt("sample_fmt", "unknown");
print_int("nb_samples", frame->nb_samples);
- print_int("channels", av_frame_get_channels(frame));
- if (av_frame_get_channel_layout(frame)) {
+ print_int("channels", frame->channels);
+ if (frame->channel_layout) {
av_bprint_clear(&pbuf);
- av_bprint_channel_layout(&pbuf, av_frame_get_channels(frame),
- av_frame_get_channel_layout(frame));
+ av_bprint_channel_layout(&pbuf, frame->channels,
+ frame->channel_layout);
print_str ("channel_layout", pbuf.str);
} else
print_str_opt("channel_layout", "unknown");
break;
}
if (do_show_frame_tags)
- show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
+ show_tags(w, frame->metadata, SECTION_ID_FRAME_TAGS);
if (do_show_log)
show_log(w, SECTION_ID_FRAME_LOGS, SECTION_ID_FRAME_LOG, do_show_log);
if (frame->nb_side_data) {