diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-06-13 21:04:06 +0200 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-06-13 22:43:57 +0200 |
commit | c7b9eab2be7099b0d4f2fed4feaf69a7dda379f0 (patch) | |
tree | 019d5b0a7eaa5e15782ec67d61100d9a3f91e916 | |
parent | 4a6d790a6fc0de15112a7bbfe8b2b58ef058a48d (diff) | |
parent | 8517e9c476e8cf92d9ed25b6486bb43d3dc2c49d (diff) | |
download | ffmpeg-c7b9eab2be7099b0d4f2fed4feaf69a7dda379f0.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master:
rtmp: Add a new option 'rtmp_buffer', for setting the client buffer time
rtmp: Set the client buffer time to 3s instead of 0.26s
rtmp: Handle server bandwidth packets
rtmp: Display a verbose message when an unknown packet type is received
lavfi/audio: use av_samples_copy() instead of custom code.
configure: add all filters hardcoded into avconv to avconv_deps
avfiltergraph: remove a redundant call to avfilter_get_by_name().
lavfi: allow building without swscale.
build: Do not delete tests/vsynth2 directory, which is no longer created.
lavfi: replace AVFilterContext.input/output_count with nb_inputs/outputs
lavfi: make AVFilterPad opaque after two major bumps.
lavfi: add avfilter_pad_get_type() and avfilter_pad_get_name().
lavfi: make avfilter_get_video_buffer() private on next bump.
jack: update to new latency range API as the old one has been deprecated
rtmp: Tokenize the AMF connection parameters manually instead of using strtok_r
ppc: Rename H.264 optimization template file for consistency.
lavfi: add channelsplit audio filter.
golomb: check remaining bits during unary decoding in get_ur_golomb_jpegls()
sws: fix planar RGB input conversions for 9/10/16 bpp.
Conflicts:
Changelog
configure
doc/APIchanges
ffmpeg.c
libavcodec/golomb.h
libavcodec/v210dec.h
libavfilter/Makefile
libavfilter/allfilters.c
libavfilter/asrc_anullsrc.c
libavfilter/audio.c
libavfilter/avfilter.c
libavfilter/avfilter.h
libavfilter/avfiltergraph.c
libavfilter/buffersrc.c
libavfilter/formats.c
libavfilter/version.h
libavfilter/vf_frei0r.c
libavfilter/vf_pad.c
libavfilter/vf_scale.c
libavfilter/video.h
libavfilter/vsrc_color.c
libavformat/rtmpproto.c
libswscale/input.c
tests/Makefile
Merged-by: Michael Niedermayer <michaelni@gmx.at>
68 files changed, 757 insertions, 220 deletions
diff --git a/.gitignore b/.gitignore index 138f076e40..e8b021d11b 100644 --- a/.gitignore +++ b/.gitignore @@ -50,7 +50,6 @@ tests/rotozoom tests/tiny_psnr tests/videogen tests/vsynth1 -tests/vsynth2 tools/aviocat tools/cws2fws tools/ffeval @@ -5,6 +5,7 @@ version next: - INI and flat output in ffprobe - Scene detection in libavfilter - Indeo Audio decoder +- channelsplit audio filter version 0.11: @@ -1210,6 +1210,7 @@ HAVE_LIST=" inet_aton inline_asm isatty + jack_port_get_latency_range kbhit ldbrx libdc1394_1 @@ -1730,7 +1731,6 @@ yadif_filter_deps="gpl" # libraries avdevice_deps="avcodec avformat" -avfilter_deps="swscale" avformat_deps="avcodec" postproc_deps="gpl" @@ -3354,7 +3354,8 @@ check_header soundcard.h enabled_any alsa_indev alsa_outdev && check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound -enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_func sem_timedwait +enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_func sem_timedwait && + check_func jack_port_get_latency_range -ljack enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio diff --git a/doc/APIchanges b/doc/APIchanges index 877e1184f3..4ced20f511 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -38,6 +38,15 @@ API changes, most recent first: 2012-03-26 - a67d9cf - lavfi 2.66.100 Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions. +2012-xx-xx - xxxxxxx - lavfi 2.23.0 - avfilter.h + Add AVFilterContext.nb_inputs/outputs. Deprecate + AVFilterContext.input/output_count. + +2012-xx-xx - xxxxxxx - lavfi 2.22.0 - avfilter.h + Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those + should now be used instead of accessing AVFilterPad members + directly. + 2012-xx-xx - xxxxxxx - lavu 51.32.0 - audioconvert.h Add av_get_channel_layout_channel_index(), av_get_channel_name() and av_channel_layout_extract_channel(). diff --git a/doc/filters.texi b/doc/filters.texi index ac79c4c4aa..f69970c149 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -576,6 +576,31 @@ Maximum compensation in samples per second. @end table +@section channelsplit +Split each channel in input audio stream into a separate output stream. + +This filter accepts the following named parameters: +@table @option +@item channel_layout +Channel layout of the input stream. Default is "stereo". +@end table + +For example, assuming a stereo input MP3 file +@example +ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv +@end example +will create an output Matroska file with two audio streams, one containing only +the left channel and the other the right channel. + +To split a 5.1 WAV file into per-channel files +@example +ffmpeg -i in.wav -filter_complex +'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]' +-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]' +front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]' +side_right.wav +@end example + @section resample Convert the audio sample format, sample rate and channel layout. This filter is not meant to be used directly. diff --git a/doc/protocols.texi b/doc/protocols.texi index 598b477dc8..2dd4762a33 100644 --- a/doc/protocols.texi +++ b/doc/protocols.texi @@ -228,6 +228,9 @@ Additionally, the following parameters can be set via command line options Name of application to connect on the RTMP server. This option overrides the parameter specified in the URI. +@item rtmp_buffer +Set the client buffer time in milliseconds. The default is 3000. + @item rtmp_conn Extra arbitrary AMF connection parameters, parsed from a string, e.g. like @code{B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0}. @@ -708,7 +708,7 @@ static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost) static void init_input_filter(FilterGraph *fg, AVFilterInOut *in) { InputStream *ist = NULL; - enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type; + enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx); int i; // TODO: support other filter types @@ -978,7 +978,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, \ avio_printf(pb, "%s", ctx->filter->name); \ if (nb_pads > 1) \ - avio_printf(pb, ":%s", pads[inout->pad_idx].name); \ + avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\ avio_w8(pb, 0); \ avio_close_dyn_buf(pb, &f->name); \ } @@ -988,7 +988,7 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFil av_freep(&ofilter->name); DESCRIBE_FILTER_LINK(ofilter, out, 0); - switch (out->filter_ctx->output_pads[out->pad_idx].type) { + switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) { case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out); case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out); default: av_assert0(0); @@ -1132,7 +1132,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, av_freep(&ifilter->name); DESCRIBE_FILTER_LINK(ifilter, in, 1); - switch (in->filter_ctx->input_pads[in->pad_idx].type) { + switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) { case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in); case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in); default: av_assert0(0); @@ -4899,7 +4899,8 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o, { OutputStream *ost; - switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) { + switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads, + ofilter->out_tmp->pad_idx)) { case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break; case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break; default: @@ -4961,7 +4962,8 @@ static void opt_output_file(void *optctx, const char *filename) if (!ofilter->out_tmp || ofilter->out_tmp->name) continue; - switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) { + switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads, + ofilter->out_tmp->pad_idx)) { case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break; case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break; case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break; diff --git a/libavcodec/ppc/h264_altivec.c b/libavcodec/ppc/h264_altivec.c index 3e7edc74df..7c89b852a0 100644 --- a/libavcodec/ppc/h264_altivec.c +++ b/libavcodec/ppc/h264_altivec.c @@ -39,7 +39,7 @@ #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num -#include "h264_template_altivec.c" +#include "h264_altivec_template.c" #undef OP_U8_ALTIVEC #undef PREFIX_h264_chroma_mc8_altivec #undef PREFIX_h264_chroma_mc8_num @@ -59,7 +59,7 @@ #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num -#include "h264_template_altivec.c" +#include "h264_altivec_template.c" #undef OP_U8_ALTIVEC #undef PREFIX_h264_chroma_mc8_altivec #undef PREFIX_h264_chroma_mc8_num diff --git a/libavcodec/ppc/h264_template_altivec.c b/libavcodec/ppc/h264_altivec_template.c index 2573e9c6f7..2573e9c6f7 100644 --- a/libavcodec/ppc/h264_template_altivec.c +++ b/libavcodec/ppc/h264_altivec_template.c diff --git a/libavcodec/ppc/vc1dsp_altivec.c b/libavcodec/ppc/vc1dsp_altivec.c index bf76adb359..33e87afc3a 100644 --- a/libavcodec/ppc/vc1dsp_altivec.c +++ b/libavcodec/ppc/vc1dsp_altivec.c @@ -325,13 +325,13 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block) #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec -#include "h264_template_altivec.c" +#include "h264_altivec_template.c" #undef OP_U8_ALTIVEC #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec -#include "h264_template_altivec.c" +#include "h264_altivec_template.c" #undef OP_U8_ALTIVEC #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec diff --git a/libavcodec/v210dec.h b/libavcodec/v210dec.h index 1f06f9eac9..e1e3d32ccc 100644 --- a/libavcodec/v210dec.h +++ b/libavcodec/v210dec.h @@ -22,6 +22,7 @@ #include "libavutil/log.h" #include "libavutil/opt.h" + typedef struct { AVClass *av_class; int custom_stride; diff --git a/libavdevice/jack_audio.c b/libavdevice/jack_audio.c index 1fa4f86724..33ee19ce73 100644 --- a/libavdevice/jack_audio.c +++ b/libavdevice/jack_audio.c @@ -92,7 +92,13 @@ static int process_callback(jack_nframes_t nframes, void *arg) /* Copy and interleave audio data from the JACK buffer into the packet */ for (i = 0; i < self->nports; i++) { + #if HAVE_JACK_PORT_GET_LATENCY_RANGE + jack_latency_range_t range; + jack_port_get_latency_range(self->ports[i], JackCaptureLatency, &range); + latency += range.max; + #else latency += jack_port_get_total_latency(self->client, self->ports[i]); + #endif buffer = jack_port_get_buffer(self->ports[i], self->buffer_size); for (j = 0; j < self->buffer_size; j++) pkt_data[j * self->nports + i] = buffer[j]; diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 29345fc15e..95126a8b1d 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -1,9 +1,10 @@ include $(SUBDIR)../config.mak NAME = avfilter -FFLIBS = avutil swscale +FFLIBS = avutil FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample +FFLIBS-$(CONFIG_SCALE_FILTER) += swscale FFLIBS-$(CONFIG_ACONVERT_FILTER) += swresample FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec @@ -54,6 +55,7 @@ OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o OBJS-$(CONFIG_ASPLIT_FILTER) += split.o OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o +OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o OBJS-$(CONFIG_PAN_FILTER) += af_pan.o OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o @@ -102,6 +104,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o +OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o diff --git a/libavfilter/af_amerge.c b/libavfilter/af_amerge.c index 3183fdcac9..95dd1ae479 100644 --- a/libavfilter/af_amerge.c +++ b/libavfilter/af_amerge.c @@ -118,7 +118,7 @@ static int query_formats(AVFilterContext *ctx) if ((inlayout[i] >> c) & 1) *(route[i]++) = out_ch_number++; } - formats = avfilter_make_format_list(ff_packed_sample_fmts); + formats = avfilter_make_format_list(ff_packed_sample_fmts_array); avfilter_set_common_sample_formats(ctx, formats); for (i = 0; i < am->nb_inputs; i++) { layouts = NULL; diff --git a/libavfilter/af_amix.c b/libavfilter/af_amix.c index 3fc2e8499a..003a8e8e62 100644 --- a/libavfilter/af_amix.c +++ b/libavfilter/af_amix.c @@ -454,10 +454,10 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) AVFilterLink *outlink = ctx->outputs[0]; int i; - for (i = 0; i < ctx->input_count; i++) + for (i = 0; i < ctx->nb_inputs; i++) if (ctx->inputs[i] == inlink) break; - if (i >= ctx->input_count) { + if (i >= ctx->nb_inputs) { av_log(ctx, AV_LOG_ERROR, "unknown input link\n"); return; } @@ -518,7 +518,7 @@ static void uninit(AVFilterContext *ctx) av_freep(&s->input_state); av_freep(&s->input_scale); - for (i = 0; i < ctx->input_count; i++) + for (i = 0; i < ctx->nb_inputs; i++) av_freep(&ctx->input_pads[i].name); } diff --git a/libavfilter/af_anull.c b/libavfilter/af_anull.c index c23c9d8005..81d1bf8a25 100644 --- a/libavfilter/af_anull.c +++ b/libavfilter/af_anull.c @@ -24,6 +24,7 @@ #include "audio.h" #include "avfilter.h" +#include "internal.h" AVFilter avfilter_af_anull = { .name = "anull", diff --git a/libavfilter/af_channelsplit.c b/libavfilter/af_channelsplit.c new file mode 100644 index 0000000000..c9b31fa791 --- /dev/null +++ b/libavfilter/af_channelsplit.c @@ -0,0 +1,146 @@ +/* + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Channel split filter + * + * Split an audio stream into per-channel streams. + */ + +#include "libavutil/audioconvert.h" +#include "libavutil/opt.h" + +#include "audio.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" + +typedef struct ChannelSplitContext { + const AVClass *class; + + uint64_t channel_layout; + char *channel_layout_str; +} ChannelSplitContext; + +#define OFFSET(x) offsetof(ChannelSplitContext, x) +#define A AV_OPT_FLAG_AUDIO_PARAM +static const AVOption options[] = { + { "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A }, + { NULL }, +}; + +static const AVClass channelsplit_class = { + .class_name = "channelsplit filter", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +static int init(AVFilterContext *ctx, const char *arg, void *opaque) +{ + ChannelSplitContext *s = ctx->priv; + int nb_channels; + int ret = 0, i; + + s->class = &channelsplit_class; + av_opt_set_defaults(s); + if ((ret = av_set_options_string(s, arg, "=", ":")) < 0) { + av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", arg); + return ret; + } + if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) { + av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n", + s->channel_layout_str); + ret = AVERROR(EINVAL); + goto fail; + } + + nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); + for (i = 0; i < nb_channels; i++) { + uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i); + AVFilterPad pad = { 0 }; + + pad.type = AVMEDIA_TYPE_AUDIO; + pad.name = av_get_channel_name(channel); + + ff_insert_outpad(ctx, i, &pad); + } + +fail: + av_opt_free(s); + return ret; +} + +static int query_formats(AVFilterContext *ctx) +{ + ChannelSplitContext *s = ctx->priv; + AVFilterChannelLayouts *in_layouts = NULL; + int i; + + ff_set_common_formats (ctx, ff_planar_sample_fmts()); + ff_set_common_samplerates(ctx, ff_all_samplerates()); + + ff_add_channel_layout(&in_layouts, s->channel_layout); + ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts); + + for (i = 0; i < ctx->nb_outputs; i++) { + AVFilterChannelLayouts *out_layouts = NULL; + uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i); + + ff_add_channel_layout(&out_layouts, channel); + ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts); + } + + return 0; +} + +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) +{ + AVFilterContext *ctx = inlink->dst; + int i; + + for (i = 0; i < ctx->nb_outputs; i++) { + AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE); + + if (!buf_out) + return; + + buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i]; + buf_out->audio->channel_layout = + av_channel_layout_extract_channel(buf->audio->channel_layout, i); + + ff_filter_samples(ctx->outputs[i], buf_out); + } + avfilter_unref_buffer(buf); +} + +AVFilter avfilter_af_channelsplit = { + .name = "channelsplit", + .description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams"), + .priv_size = sizeof(ChannelSplitContext), + + .init = init, + .query_formats = query_formats, + + .inputs = (const AVFilterPad[]){{ .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + .filter_samples = filter_samples, }, + { NULL }}, + .outputs = (const AVFilterPad[]){{ NULL }}, +}; diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index b9d44f2fdf..f8d6b389b4 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -44,6 +44,7 @@ void avfilter_register_all(void) REGISTER_FILTER (ASPLIT, asplit, af); REGISTER_FILTER (ASTREAMSYNC, astreamsync, af); REGISTER_FILTER (ASYNCTS, asyncts, af); + REGISTER_FILTER (CHANNELSPLIT,channelsplit,af); REGISTER_FILTER (EARWAX, earwax, af); REGISTER_FILTER (PAN, pan, af); REGISTER_FILTER (SILENCEDETECT, silencedetect, af); @@ -92,6 +93,7 @@ void avfilter_register_all(void) REGISTER_FILTER (PAD, pad, vf); REGISTER_FILTER (PIXDESCTEST, pixdesctest, vf); REGISTER_FILTER (REMOVELOGO, removelogo, vf); + REGISTER_FILTER (SCALE, scale, vf); REGISTER_FILTER (SELECT, select, vf); REGISTER_FILTER (SETDAR, setdar, vf); REGISTER_FILTER (SETFIELD, setfield, vf); @@ -143,8 +145,4 @@ void avfilter_register_all(void) extern AVFilter avfilter_asink_abuffer; avfilter_register(&avfilter_asink_abuffer); } - { - extern AVFilter avfilter_vf_scale; - avfilter_register(&avfilter_vf_scale); - } } diff --git a/libavfilter/asink_anullsink.c b/libavfilter/asink_anullsink.c index 6314840348..4349544b62 100644 --- a/libavfilter/asink_anullsink.c +++ b/libavfilter/asink_anullsink.c @@ -19,6 +19,7 @@ */ #include "avfilter.h" +#include "internal.h" static void null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { } diff --git a/libavfilter/asrc_anullsrc.c b/libavfilter/asrc_anullsrc.c index 73932bd538..473c6b60e5 100644 --- a/libavfilter/asrc_anullsrc.c +++ b/libavfilter/asrc_anullsrc.c @@ -24,6 +24,7 @@ * null audio source */ +#include "internal.h" #include "libavutil/audioconvert.h" #include "libavutil/opt.h" diff --git a/libavfilter/audio.c b/libavfilter/audio.c index 1334c2b7bc..d473b99393 100644 --- a/libavfilter/audio.c +++ b/libavfilter/audio.c @@ -160,7 +160,7 @@ void ff_default_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesr { AVFilterLink *outlink = NULL; - if (inlink->dst->output_count) + if (inlink->dst->nb_outputs) outlink = inlink->dst->outputs[0]; if (outlink) { @@ -190,10 +190,7 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) /* prepare to copy the samples if the buffer has insufficient permissions */ if ((dst->min_perms & samplesref->perms) != dst->min_perms || dst->rej_perms & samplesref->perms) { - int i, size, planar = av_sample_fmt_is_planar(samplesref->format); - int planes = !planar ? 1: - av_get_channel_layout_nb_channels(samplesref->audio->channel_layout); - + int size; av_log(link->dst, AV_LOG_DEBUG, "Copying audio data in avfilter (have perms %x, need %x, reject %x)\n", samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms); @@ -204,13 +201,10 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) link->cur_buf->audio->sample_rate = samplesref->audio->sample_rate; /* Copy actual data into new samples buffer */ - /* src can be larger than dst if it was allocated larger than necessary. - dst can be slightly larger due to extra alignment padding. */ - size = FFMIN(samplesref->linesize[0], link->cur_buf->linesize[0]); - for (i = 0; samplesref->data[i] && i < 8; i++) - memcpy(link->cur_buf->data[i], samplesref->data[i], size); - for (i = 0; i < planes; i++) - memcpy(link->cur_buf->extended_data[i], samplesref->extended_data[i], size); + av_samples_copy(link->cur_buf->extended_data, samplesref->extended_data, + 0, 0, samplesref->audio->nb_samples, + av_get_channel_layout_nb_channels(link->channel_layout), + link->format); avfilter_unref_buffer(samplesref); } else diff --git a/libavfilter/audio.h b/libavfilter/audio.h index b63f85f45c..e361edc5f8 100644 --- a/libavfilter/audio.h +++ b/libavfilter/audio.h @@ -24,7 +24,7 @@ #include "avfilter.h" -static const enum AVSampleFormat ff_packed_sample_fmts[] = { +static const enum AVSampleFormat ff_packed_sample_fmts_array[] = { AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, @@ -33,7 +33,7 @@ static const enum AVSampleFormat ff_packed_sample_fmts[] = { AV_SAMPLE_FMT_NONE }; -static const enum AVSampleFormat ff_planar_sample_fmts[] = { +static const enum AVSampleFormat ff_planar_sample_fmts_array[] = { AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P, diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index f79c7129df..6c2aaa3549 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -120,8 +120,8 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad, { AVFilterLink *link; - if (src->output_count <= srcpad || dst->input_count <= dstpad || - src->outputs[srcpad] || dst->inputs[dstpad]) + if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad || + src->outputs[srcpad] || dst->inputs[dstpad]) return -1; if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) { @@ -200,9 +200,9 @@ int avfilter_config_links(AVFilterContext *filter) unsigned i; int ret; - for (i = 0; i < filter->input_count; i ++) { + for (i = 0; i < filter->nb_inputs; i ++) { AVFilterLink *link = filter->inputs[i]; - AVFilterLink *inlink = link->src->input_count ? + AVFilterLink *inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL; if (!link) continue; @@ -222,7 +222,7 @@ int avfilter_config_links(AVFilterContext *filter) return ret; if (!(config_link = link->srcpad->config_props)) { - if (link->src->input_count != 1) { + if (link->src->nb_inputs != 1) { av_log(link->src, AV_LOG_ERROR, "Source filters and filters " "with more than one input " "must set config_props() " @@ -335,7 +335,7 @@ int ff_poll_frame(AVFilterLink *link) if (link->srcpad->poll_frame) return link->srcpad->poll_frame(link); - for (i = 0; i < link->src->input_count; i++) { + for (i = 0; i < link->src->nb_inputs; i++) { int val; if (!link->src->inputs[i]) return -1; @@ -450,27 +450,31 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in goto err; } - ret->input_count = pad_count(filter->inputs); - if (ret->input_count) { - ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->input_count); + ret->nb_inputs = pad_count(filter->inputs); + if (ret->nb_inputs ) { + ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs); if (!ret->input_pads) goto err; - memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->input_count); - ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->input_count); + memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs); + ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs); if (!ret->inputs) goto err; } - ret->output_count = pad_count(filter->outputs); - if (ret->output_count) { - ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->output_count); + ret->nb_outputs = pad_count(filter->outputs); + if (ret->nb_outputs) { + ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs); if (!ret->output_pads) goto err; - memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->output_count); - ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->output_count); + memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs); + ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs); if (!ret->outputs) goto err; } +#if FF_API_FOO_COUNT + ret->output_count = ret->nb_outputs; + ret->input_count = ret->nb_inputs; +#endif *filter_ctx = ret; return 0; @@ -478,10 +482,10 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in err: av_freep(&ret->inputs); av_freep(&ret->input_pads); - ret->input_count = 0; + ret->nb_inputs = 0; av_freep(&ret->outputs); av_freep(&ret->output_pads); - ret->output_count = 0; + ret->nb_outputs = 0; av_freep(&ret->priv); av_free(ret); return AVERROR(ENOMEM); @@ -498,7 +502,7 @@ void avfilter_free(AVFilterContext *filter) if (filter->filter->uninit) filter->filter->uninit(filter); - for (i = 0; i < filter->input_count; i++) { + for (i = 0; i < filter->nb_inputs; i++) { if ((link = filter->inputs[i])) { if (link->src) link->src->outputs[link->srcpad - link->src->output_pads] = NULL; @@ -511,7 +515,7 @@ void avfilter_free(AVFilterContext *filter) } avfilter_link_free(&link); } - for (i = 0; i < filter->output_count; i++) { + for (i = 0; i < filter->nb_outputs; i++) { if ((link = filter->outputs[i])) { if (link->dst) link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL; @@ -546,6 +550,16 @@ int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque return ret; } +const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx) +{ + return pads[pad_idx].name; +} + +enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx) +{ + return pads[pad_idx].type; +} + #if FF_API_DEFAULT_CONFIG_OUTPUT_LINK void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, AVFilterPad **pads, AVFilterLink ***links, @@ -556,14 +570,20 @@ void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, void avfilter_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p) { - ff_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad), + ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad), &f->input_pads, &f->inputs, p); +#if FF_API_FOO_COUNT + f->input_count = f->nb_inputs; +#endif } void avfilter_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p) { - ff_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad), + ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad), &f->output_pads, &f->outputs, p); +#if FF_API_FOO_COUNT + f->output_count = f->nb_outputs; +#endif } int avfilter_poll_frame(AVFilterLink *link) { diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index 955550eb1e..3c7af2d83d 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -369,10 +369,16 @@ void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats * */ #endif +#if FF_API_AVFILTERPAD_PUBLIC /** * A filter pad used for either input or output. * * See doc/filter_design.txt for details on how to implement the methods. + * + * @warning this struct might be removed from public API. + * users should call avfilter_pad_get_name() and avfilter_pad_get_type() + * to access the name and type fields; there should be no need to access + * any other fields from outside of libavfilter. */ struct AVFilterPad { /** @@ -499,6 +505,29 @@ struct AVFilterPad { */ int (*config_props)(AVFilterLink *link); }; +#endif + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx); #if FF_API_FILTERS_PUBLIC /** default handler for start_frame() for video inputs */ @@ -608,16 +637,23 @@ struct AVFilterContext { char *name; ///< name of this filter instance - unsigned input_count; ///< number of input pads +#if FF_API_FOO_COUNT + unsigned input_count; ///< @deprecated use nb_inputs +#endif AVFilterPad *input_pads; ///< array of input pads AVFilterLink **inputs; ///< array of pointers to input links - unsigned output_count; ///< number of output pads +#if FF_API_FOO_COUNT + unsigned output_count; ///< @deprecated use nb_outputs +#endif AVFilterPad *output_pads; ///< array of output pads AVFilterLink **outputs; ///< array of pointers to output links void *priv; ///< private data for use by the filter + unsigned nb_inputs; ///< number of input pads + unsigned nb_outputs; ///< number of output pads + struct AVFilterCommand *command_queue; }; @@ -777,19 +813,11 @@ void avfilter_link_free(AVFilterLink **link); */ int avfilter_config_links(AVFilterContext *filter); -/** - * Request a picture buffer with a specific set of permissions. - * - * @param link the output link to the filter from which the buffer will - * be requested - * @param perms the required access permissions - * @param w the minimum width of the buffer to allocate - * @param h the minimum height of the buffer to allocate - * @return A reference to the buffer. This must be unreferenced with - * avfilter_unref_buffer when you are finished with it. - */ +#if FF_API_FILTERS_PUBLIC +attribute_deprecated AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h); +#endif /** * Create a buffer reference wrapped around an already allocated image diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c index 09bbe106cf..0f363b28e9 100644 --- a/libavfilter/avfiltergraph.c +++ b/libavfilter/avfiltergraph.c @@ -118,7 +118,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx) for (i = 0; i < graph->filter_count; i++) { filt = graph->filters[i]; - for (j = 0; j < filt->input_count; j++) { + for (j = 0; j < filt->nb_inputs; j++) { if (!filt->inputs[j] || !filt->inputs[j]->src) { av_log(log_ctx, AV_LOG_ERROR, "Input pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any source\n", @@ -127,7 +127,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx) } } - for (j = 0; j < filt->output_count; j++) { + for (j = 0; j < filt->nb_outputs; j++) { if (!filt->outputs[j] || !filt->outputs[j]->dst) { av_log(log_ctx, AV_LOG_ERROR, "Output pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any destination\n", @@ -153,7 +153,7 @@ static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx) for (i=0; i < graph->filter_count; i++) { filt = graph->filters[i]; - if (!filt->output_count) { + if (!filt->nb_outputs) { if ((ret = avfilter_config_links(filt))) return ret; } @@ -271,7 +271,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) /* Call query_formats on sources first. This is a temporary workaround for amerge, until format renegociation is implemented. */ - if (!graph->filters[i]->input_count == j) + if (!graph->filters[i]->nb_inputs == j) continue; if (graph->filters[i]->filter->query_formats) ret = filter_query_formats(graph->filters[i]); @@ -286,7 +286,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) for (i = 0; i < graph->filter_count; i++) { AVFilterContext *filter = graph->filters[i]; - for (j = 0; j < filter->input_count; j++) { + for (j = 0; j < filter->nb_inputs; j++) { AVFilterLink *link = filter->inputs[j]; #if 0 if (!link) continue; @@ -348,11 +348,16 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) /* couldn't merge format lists. auto-insert conversion filter */ switch (link->type) { case AVMEDIA_TYPE_VIDEO: + if (!(filter = avfilter_get_by_name("scale"))) { + av_log(log_ctx, AV_LOG_ERROR, "'scale' filter " + "not present, cannot convert pixel formats.\n"); + return AVERROR(EINVAL); + } + snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d", scaler_count++); snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts); - if ((ret = avfilter_graph_create_filter(&convert, - avfilter_get_by_name("scale"), + if ((ret = avfilter_graph_create_filter(&convert, filter, inst_name, scale_args, NULL, graph)) < 0) return ret; @@ -366,8 +371,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d", resampler_count++); - if ((ret = avfilter_graph_create_filter(&convert, - avfilter_get_by_name("aresample"), + if ((ret = avfilter_graph_create_filter(&convert, filter, inst_name, NULL, NULL, graph)) < 0) return ret; break; @@ -464,7 +468,7 @@ static int pick_format(AVFilterLink *link, AVFilterLink *ref) #define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \ do { \ - for (i = 0; i < filter->input_count; i++) { \ + for (i = 0; i < filter->nb_inputs; i++) { \ AVFilterLink *link = filter->inputs[i]; \ fmt_type fmt; \ \ @@ -472,7 +476,7 @@ do { \ continue; \ fmt = link->out_ ## list->var[0]; \ \ - for (j = 0; j < filter->output_count; j++) { \ + for (j = 0; j < filter->nb_outputs; j++) { \ AVFilterLink *out_link = filter->outputs[j]; \ list_type *fmts; \ \ @@ -529,19 +533,19 @@ static void swap_samplerates_on_filter(AVFilterContext *filter) int sample_rate; int i, j; - for (i = 0; i < filter->input_count; i++) { + for (i = 0; i < filter->nb_inputs; i++) { link = filter->inputs[i]; if (link->type == AVMEDIA_TYPE_AUDIO && link->out_samplerates->format_count == 1) break; } - if (i == filter->input_count) + if (i == filter->nb_inputs) return; sample_rate = link->out_samplerates->formats[0]; - for (i = 0; i < filter->output_count; i++) { + for (i = 0; i < filter->nb_outputs; i++) { AVFilterLink *outlink = filter->outputs[i]; int best_idx, best_diff = INT_MAX; @@ -576,19 +580,19 @@ static void swap_channel_layouts_on_filter(AVFilterContext *filter) uint64_t chlayout; int i, j; - for (i = 0; i < filter->input_count; i++) { + for (i = 0; i < filter->nb_inputs; i++) { link = filter->inputs[i]; if (link->type == AVMEDIA_TYPE_AUDIO && link->out_channel_layouts->nb_channel_layouts == 1) break; } - if (i == filter->input_count) + if (i == filter->nb_inputs) return; chlayout = link->out_channel_layouts->channel_layouts[0]; - for (i = 0; i < filter->output_count; i++) { + for (i = 0; i < filter->nb_outputs; i++) { AVFilterLink *outlink = filter->outputs[i]; int best_idx, best_score = INT_MIN; @@ -629,20 +633,20 @@ static void swap_sample_fmts_on_filter(AVFilterContext *filter) int format, bps; int i, j; - for (i = 0; i < filter->input_count; i++) { + for (i = 0; i < filter->nb_inputs; i++) { link = filter->inputs[i]; if (link->type == AVMEDIA_TYPE_AUDIO && link->out_formats->format_count == 1) break; } - if (i == filter->input_count) + if (i == filter->nb_inputs) return; format = link->out_formats->formats[0]; bps = av_get_bytes_per_sample(format); - for (i = 0; i < filter->output_count; i++) { + for (i = 0; i < filter->nb_outputs; i++) { AVFilterLink *outlink = filter->outputs[i]; int best_idx, best_score = INT_MIN; @@ -700,24 +704,24 @@ static int pick_formats(AVFilterGraph *graph) change = 0; for (i = 0; i < graph->filter_count; i++) { AVFilterContext *filter = graph->filters[i]; - if (filter->input_count){ - for (j = 0; j < filter->input_count; j++){ + if (filter->nb_inputs){ + for (j = 0; j < filter->nb_inputs; j++){ if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) { pick_format(filter->inputs[j], NULL); change = 1; } } } - if (filter->output_count){ - for (j = 0; j < filter->output_count; j++){ + if (filter->nb_outputs){ + for (j = 0; j < filter->nb_outputs; j++){ if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) { pick_format(filter->outputs[j], NULL); change = 1; } } } - if (filter->input_count && filter->output_count && filter->inputs[0]->format>=0) { - for (j = 0; j < filter->output_count; j++) { + if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) { + for (j = 0; j < filter->nb_outputs; j++) { if(filter->outputs[j]->format<0) { pick_format(filter->outputs[j], filter->inputs[0]); change = 1; @@ -730,10 +734,10 @@ static int pick_formats(AVFilterGraph *graph) for (i = 0; i < graph->filter_count; i++) { AVFilterContext *filter = graph->filters[i]; - for (j = 0; j < filter->input_count; j++) + for (j = 0; j < filter->nb_inputs; j++) if ((ret = pick_format(filter->inputs[j], NULL)) < 0) return ret; - for (j = 0; j < filter->output_count; j++) + for (j = 0; j < filter->nb_outputs; j++) if ((ret = pick_format(filter->outputs[j], NULL)) < 0) return ret; } @@ -778,18 +782,18 @@ static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph, for (i = 0; i < graph->filter_count; i++) { f = graph->filters[i]; - for (j = 0; j < f->input_count; j++) { + for (j = 0; j < f->nb_inputs; j++) { f->inputs[j]->graph = graph; f->inputs[j]->age_index = -1; } - for (j = 0; j < f->output_count; j++) { + for (j = 0; j < f->nb_outputs; j++) { f->outputs[j]->graph = graph; f->outputs[j]->age_index= -1; } - if (!f->output_count) { - if (f->input_count > INT_MAX - sink_links_count) + if (!f->nb_outputs) { + if (f->nb_inputs > INT_MAX - sink_links_count) return AVERROR(EINVAL); - sink_links_count += f->input_count; + sink_links_count += f->nb_inputs; } } sinks = av_calloc(sink_links_count, sizeof(*sinks)); @@ -797,8 +801,8 @@ static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph, return AVERROR(ENOMEM); for (i = 0; i < graph->filter_count; i++) { f = graph->filters[i]; - if (!f->output_count) { - for (j = 0; j < f->input_count; j++) { + if (!f->nb_outputs) { + for (j = 0; j < f->nb_inputs; j++) { sinks[n] = f->inputs[j]; f->inputs[j]->age_index = n++; } diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c index b339dd79f5..b50a5e1715 100644 --- a/libavfilter/buffersrc.c +++ b/libavfilter/buffersrc.c @@ -84,7 +84,7 @@ static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx, switch (outlink->type) { case AVMEDIA_TYPE_VIDEO: - buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, + buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, ref->video->w, ref->video->h); if(!buf) return NULL; diff --git a/libavfilter/formats.c b/libavfilter/formats.c index 930ed16f5a..0b1ef30d22 100644 --- a/libavfilter/formats.c +++ b/libavfilter/formats.c @@ -303,6 +303,18 @@ AVFilterFormats *avfilter_make_all_packing_formats(void) } #endif +AVFilterFormats *ff_planar_sample_fmts(void) +{ + AVFilterFormats *ret = NULL; + int fmt; + + for (fmt = 0; fmt < AV_SAMPLE_FMT_NB; fmt++) + if (av_sample_fmt_is_planar(fmt)) + ff_add_format(&ret, fmt); + + return ret; +} + AVFilterFormats *ff_all_samplerates(void) { AVFilterFormats *ret = av_mallocz(sizeof(*ret)); @@ -401,13 +413,13 @@ void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref) { \ int count = 0, i; \ \ - for (i = 0; i < ctx->input_count; i++) { \ + for (i = 0; i < ctx->nb_inputs; i++) { \ if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \ ref(fmts, &ctx->inputs[i]->out_fmts); \ count++; \ } \ } \ - for (i = 0; i < ctx->output_count; i++) { \ + for (i = 0; i < ctx->nb_outputs; i++) { \ if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \ ref(fmts, &ctx->outputs[i]->in_fmts); \ count++; \ diff --git a/libavfilter/formats.h b/libavfilter/formats.h index 7e16733b74..4cbfb74074 100644 --- a/libavfilter/formats.h +++ b/libavfilter/formats.h @@ -163,6 +163,11 @@ int ff_add_format(AVFilterFormats **avff, int64_t fmt); AVFilterFormats *ff_all_formats(enum AVMediaType type); /** + * Construct a formats list containing all planar sample formats. + */ +AVFilterFormats *ff_planar_sample_fmts(void); + +/** * Return a format list which contains the intersection of the formats of * a and b. Also, all the references of a, all the references of b, and * a and b themselves will be deallocated. diff --git a/libavfilter/graphparser.c b/libavfilter/graphparser.c index 3f23977474..ba5d9b05a2 100644 --- a/libavfilter/graphparser.c +++ b/libavfilter/graphparser.c @@ -226,7 +226,7 @@ static int link_filter_inouts(AVFilterContext *filt_ctx, { int pad, ret; - for (pad = 0; pad < filt_ctx->input_count; pad++) { + for (pad = 0; pad < filt_ctx->nb_inputs; pad++) { AVFilterInOut *p = *curr_inputs; if (p) { @@ -254,7 +254,7 @@ static int link_filter_inouts(AVFilterContext *filt_ctx, return AVERROR(EINVAL); } - pad = filt_ctx->output_count; + pad = filt_ctx->nb_outputs; while (pad--) { AVFilterInOut *currlinkn = av_mallocz(sizeof(AVFilterInOut)); if (!currlinkn) diff --git a/libavfilter/internal.h b/libavfilter/internal.h index 997b85f3c4..7e5cc1ac5e 100644 --- a/libavfilter/internal.h +++ b/libavfilter/internal.h @@ -50,6 +50,132 @@ typedef struct AVFilterCommand { */ void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link); +#if !FF_API_AVFILTERPAD_PUBLIC +/** + * A filter pad used for either input or output. + */ +struct AVFilterPad { + /** + * Pad name. The name is unique among inputs and among outputs, but an + * input may have the same name as an output. This may be NULL if this + * pad has no need to ever be referenced by name. + */ + const char *name; + + /** + * AVFilterPad type. + */ + enum AVMediaType type; + + /** + * Minimum required permissions on incoming buffers. Any buffer with + * insufficient permissions will be automatically copied by the filter + * system to a new buffer which provides the needed access permissions. + * + * Input pads only. + */ + int min_perms; + + /** + * Permissions which are not accepted on incoming buffers. Any buffer + * which has any of these permissions set will be automatically copied + * by the filter system to a new buffer which does not have those + * permissions. This can be used to easily disallow buffers with + * AV_PERM_REUSE. + * + * Input pads only. + */ + int rej_perms; + + /** + * Callback called before passing the first slice of a new frame. If + * NULL, the filter layer will default to storing a reference to the + * picture inside the link structure. + * + * Input video pads only. + */ + void (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); + + /** + * Callback function to get a video buffer. If NULL, the filter system will + * use avfilter_default_get_video_buffer(). + * + * Input video pads only. + */ + AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h); + + /** + * Callback function to get an audio buffer. If NULL, the filter system will + * use avfilter_default_get_audio_buffer(). + * + * Input audio pads only. + */ + AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms, + int nb_samples); + + /** + * Callback called after the slices of a frame are completely sent. If + * NULL, the filter layer will default to releasing the reference stored + * in the link structure during start_frame(). + * + * Input video pads only. + */ + void (*end_frame)(AVFilterLink *link); + + /** + * Slice drawing callback. This is where a filter receives video data + * and should do its processing. + * + * Input video pads only. + */ + void (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); + + /** + * Samples filtering callback. This is where a filter receives audio data + * and should do its processing. + * + * Input audio pads only. + */ + void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref); + + /** + * Frame poll callback. This returns the number of immediately available + * samples. It should return a positive value if the next request_frame() + * is guaranteed to return one frame (with no delay). + * + * Defaults to just calling the source poll_frame() method. + * + * Output pads only. + */ + int (*poll_frame)(AVFilterLink *link); + + /** + * Frame request callback. A call to this should result in at least one + * frame being output over the given link. This should return zero on + * success, and another value on error. + * + * Output pads only. + */ + int (*request_frame)(AVFilterLink *link); + + /** + * Link configuration callback. + * + * For output pads, this should set the link properties such as + * width/height. This should NOT set the format property - that is + * negotiated between filters by the filter system using the + * query_formats() callback before this function is called. + * + * For input pads, this should check the properties of the link, and update + * the filter's internal state as necessary. + * + * For both input and output filters, this should return zero on success, + * and another value on error. + */ + int (*config_props)(AVFilterLink *link); +}; +#endif + /** default handler for freeing audio/video buffer when there are no references left */ void ff_avfilter_default_free_buffer(AVFilterBuffer *buf); @@ -165,16 +291,22 @@ void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, static inline void ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p) { - ff_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad), + ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad), &f->input_pads, &f->inputs, p); +#if FF_API_FOO_COUNT + f->input_count = f->nb_inputs; +#endif } /** Insert a new output pad for the filter. */ static inline void ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p) { - ff_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad), + ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad), &f->output_pads, &f->outputs, p); +#if FF_API_FOO_COUNT + f->output_count = f->nb_outputs; +#endif } /** diff --git a/libavfilter/split.c b/libavfilter/split.c index b7d8b87e68..899fe3ea7b 100644 --- a/libavfilter/split.c +++ b/libavfilter/split.c @@ -59,7 +59,7 @@ static void split_uninit(AVFilterContext *ctx) { int i; - for (i = 0; i < ctx->output_count; i++) + for (i = 0; i < ctx->nb_outputs; i++) av_freep(&ctx->output_pads[i].name); } @@ -68,7 +68,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) AVFilterContext *ctx = inlink->dst; int i; - for (i = 0; i < ctx->output_count; i++) + for (i = 0; i < ctx->nb_outputs; i++) ff_start_frame(ctx->outputs[i], avfilter_ref_buffer(picref, ~AV_PERM_WRITE)); } @@ -78,7 +78,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) AVFilterContext *ctx = inlink->dst; int i; - for (i = 0; i < ctx->output_count; i++) + for (i = 0; i < ctx->nb_outputs; i++) ff_draw_slice(ctx->outputs[i], y, h, slice_dir); } @@ -87,7 +87,7 @@ static void end_frame(AVFilterLink *inlink) AVFilterContext *ctx = inlink->dst; int i; - for (i = 0; i < ctx->output_count; i++) + for (i = 0; i < ctx->nb_outputs; i++) ff_end_frame(ctx->outputs[i]); avfilter_unref_buffer(inlink->cur_buf); @@ -115,7 +115,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) AVFilterContext *ctx = inlink->dst; int i; - for (i = 0; i < ctx->output_count; i++) + for (i = 0; i < ctx->nb_outputs; i++) ff_filter_samples(inlink->dst->outputs[i], avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE)); } diff --git a/libavfilter/src_movie.c b/libavfilter/src_movie.c index 354d67c941..eea2db3e8c 100644 --- a/libavfilter/src_movie.c +++ b/libavfilter/src_movie.c @@ -39,6 +39,7 @@ #include "avcodec.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { @@ -272,8 +273,8 @@ static int movie_get_frame(AVFilterLink *outlink) if (frame_decoded) { /* FIXME: avoid the memcpy */ - movie->picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE2, outlink->w, outlink->h); + movie->picref = ff_get_video_buffer(outlink, AV_PERM_WRITE | AV_PERM_PRESERVE | + AV_PERM_REUSE2, outlink->w, outlink->h); av_image_copy(movie->picref->data, movie->picref->linesize, (void*)movie->frame->data, movie->frame->linesize, movie->picref->format, outlink->w, outlink->h); diff --git a/libavfilter/version.h b/libavfilter/version.h index c90b4ad43a..3ebea25e07 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -29,8 +29,8 @@ #include "libavutil/avutil.h" #define LIBAVFILTER_VERSION_MAJOR 2 -#define LIBAVFILTER_VERSION_MINOR 78 -#define LIBAVFILTER_VERSION_MICRO 101 +#define LIBAVFILTER_VERSION_MINOR 79 +#define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ LIBAVFILTER_VERSION_MINOR, \ @@ -62,5 +62,11 @@ #ifndef FF_API_FILTERS_PUBLIC #define FF_API_FILTERS_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 3) #endif +#ifndef FF_API_AVFILTERPAD_PUBLIC +#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FOO_COUNT +#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4) +#endif #endif // AVFILTER_VERSION_H diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c index 179ef292b7..e7f431f746 100644 --- a/libavfilter/vf_aspect.c +++ b/libavfilter/vf_aspect.c @@ -26,6 +26,7 @@ #include "libavutil/mathematics.h" #include "libavutil/parseutils.h" #include "avfilter.h" +#include "internal.h" #include "video.h" typedef struct { diff --git a/libavfilter/vf_blackframe.c b/libavfilter/vf_blackframe.c index 4a415b0e38..b37bc035c7 100644 --- a/libavfilter/vf_blackframe.c +++ b/libavfilter/vf_blackframe.c @@ -30,6 +30,7 @@ #include "avfilter.h" #include "internal.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { diff --git a/libavfilter/vf_boxblur.c b/libavfilter/vf_boxblur.c index d8aa764d9a..7bab9bd67a 100644 --- a/libavfilter/vf_boxblur.c +++ b/libavfilter/vf_boxblur.c @@ -30,6 +30,7 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" static const char *const var_names[] = { diff --git a/libavfilter/vf_copy.c b/libavfilter/vf_copy.c index 8f128efc5b..111d315de6 100644 --- a/libavfilter/vf_copy.c +++ b/libavfilter/vf_copy.c @@ -22,6 +22,7 @@ */ #include "avfilter.h" +#include "internal.h" #include "video.h" AVFilter avfilter_vf_copy = { diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c index 48af9c8127..db18b2b842 100644 --- a/libavfilter/vf_crop.c +++ b/libavfilter/vf_crop.c @@ -27,6 +27,7 @@ #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" #include "libavutil/eval.h" #include "libavutil/avstring.h" diff --git a/libavfilter/vf_cropdetect.c b/libavfilter/vf_cropdetect.c index ed5d04eea8..fdb99e9719 100644 --- a/libavfilter/vf_cropdetect.c +++ b/libavfilter/vf_cropdetect.c @@ -26,6 +26,7 @@ #include "libavutil/imgutils.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { diff --git a/libavfilter/vf_delogo.c b/libavfilter/vf_delogo.c index 2949d7e931..f52b896a9c 100644 --- a/libavfilter/vf_delogo.c +++ b/libavfilter/vf_delogo.c @@ -30,6 +30,7 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" /** @@ -218,8 +219,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) AVFilterBufferRef *outpicref; if (inpicref->perms & AV_PERM_PRESERVE) { - outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, - outlink->w, outlink->h); + outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, + outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outpicref, inpicref); outpicref->video->w = outlink->w; outpicref->video->h = outlink->h; diff --git a/libavfilter/vf_drawbox.c b/libavfilter/vf_drawbox.c index c792faf5b4..42ee07514a 100644 --- a/libavfilter/vf_drawbox.c +++ b/libavfilter/vf_drawbox.c @@ -29,6 +29,7 @@ #include "libavutil/parseutils.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" enum { Y, U, V, A }; diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c index ce290edb69..1754b2a48c 100644 --- a/libavfilter/vf_drawtext.c +++ b/libavfilter/vf_drawtext.c @@ -42,6 +42,7 @@ #include "avfilter.h" #include "drawutils.h" #include "formats.h" +#include "internal.h" #include "video.h" #undef time diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c index 8ff5af9f3b..ed27a5310f 100644 --- a/libavfilter/vf_fade.c +++ b/libavfilter/vf_fade.c @@ -33,6 +33,7 @@ #include "drawutils.h" #include "internal.h" #include "formats.h" +#include "internal.h" #include "video.h" #define R 0 diff --git a/libavfilter/vf_fieldorder.c b/libavfilter/vf_fieldorder.c index 24c23e219e..0f8df15b1a 100644 --- a/libavfilter/vf_fieldorder.c +++ b/libavfilter/vf_fieldorder.c @@ -29,6 +29,7 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct @@ -112,7 +113,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; - return avfilter_get_video_buffer(outlink, perms, w, h); + return ff_get_video_buffer(outlink, perms, w, h); } static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) diff --git a/libavfilter/vf_format.c b/libavfilter/vf_format.c index 665ec2f423..cc0e142971 100644 --- a/libavfilter/vf_format.c +++ b/libavfilter/vf_format.c @@ -27,6 +27,7 @@ #include "avfilter.h" #include "internal.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c index e3dbf06e45..17ee54c19d 100644 --- a/libavfilter/vf_frei0r.c +++ b/libavfilter/vf_frei0r.c @@ -32,6 +32,7 @@ #include "libavutil/parseutils.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef f0r_instance_t (*f0r_construct_f)(unsigned int width, unsigned int height); @@ -433,7 +434,7 @@ static int source_config_props(AVFilterLink *outlink) static int source_request_frame(AVFilterLink *outlink) { Frei0rContext *frei0r = outlink->src->priv; - AVFilterBufferRef *picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); picref->video->sample_aspect_ratio = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; diff --git a/libavfilter/vf_gradfun.c b/libavfilter/vf_gradfun.c index 1b3a32933d..e863d32ed2 100644 --- a/libavfilter/vf_gradfun.c +++ b/libavfilter/vf_gradfun.c @@ -38,6 +38,7 @@ #include "avfilter.h" #include "formats.h" #include "gradfun.h" +#include "internal.h" #include "video.h" DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = { @@ -190,7 +191,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) AVFilterBufferRef *outpicref; if (inpicref->perms & AV_PERM_PRESERVE) { - outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outpicref, inpicref); outpicref->video->w = outlink->w; outpicref->video->h = outlink->h; diff --git a/libavfilter/vf_hflip.c b/libavfilter/vf_hflip.c index b32992f34c..bfc83f2fda 100644 --- a/libavfilter/vf_hflip.c +++ b/libavfilter/vf_hflip.c @@ -26,6 +26,7 @@ #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" #include "libavutil/pixdesc.h" #include "libavutil/intreadwrite.h" diff --git a/libavfilter/vf_hqdn3d.c b/libavfilter/vf_hqdn3d.c index 1d6ca78415..17d0b115a5 100644 --- a/libavfilter/vf_hqdn3d.c +++ b/libavfilter/vf_hqdn3d.c @@ -28,6 +28,7 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { diff --git a/libavfilter/vf_null.c b/libavfilter/vf_null.c index b98f3efc64..935c92a1d0 100644 --- a/libavfilter/vf_null.c +++ b/libavfilter/vf_null.c @@ -22,6 +22,7 @@ */ #include "avfilter.h" +#include "internal.h" #include "video.h" AVFilter avfilter_vf_null = { diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c index 5bcd7afb2c..2c2275e718 100644 --- a/libavfilter/vf_overlay.c +++ b/libavfilter/vf_overlay.c @@ -303,7 +303,7 @@ static int config_output(AVFilterLink *outlink) static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, int h) { - return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h); + return ff_get_video_buffer(link->dst->outputs[0], perms, w, h); } // divide by 255 and round to nearest diff --git a/libavfilter/vf_pad.c b/libavfilter/vf_pad.c index 31702b7318..413fde1f6c 100644 --- a/libavfilter/vf_pad.c +++ b/libavfilter/vf_pad.c @@ -26,6 +26,7 @@ #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" #include "libavutil/avstring.h" #include "libavutil/eval.h" @@ -220,9 +221,9 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int PadContext *pad = inlink->dst->priv; int align = (perms&AV_PERM_ALIGN) ? AVFILTER_ALIGN : 1; - AVFilterBufferRef *picref = avfilter_get_video_buffer(inlink->dst->outputs[0], perms, - w + (pad->w - pad->in_w) + 4*align, - h + (pad->h - pad->in_h)); + AVFilterBufferRef *picref = ff_get_video_buffer(inlink->dst->outputs[0], perms, + w + (pad->w - pad->in_w) + 4*align, + h + (pad->h - pad->in_h)); int plane; picref->video->w = w; @@ -287,9 +288,9 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) if(pad->needs_copy){ av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n"); avfilter_unref_buffer(outpicref); - outpicref = avfilter_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES, - FFMAX(inlink->w, pad->w), - FFMAX(inlink->h, pad->h)); + outpicref = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES, + FFMAX(inlink->w, pad->w), + FFMAX(inlink->h, pad->h)); avfilter_copy_buffer_ref_props(outpicref, inpicref); } diff --git a/libavfilter/vf_pixdesctest.c b/libavfilter/vf_pixdesctest.c index 9b7b373ecd..73ae255bca 100644 --- a/libavfilter/vf_pixdesctest.c +++ b/libavfilter/vf_pixdesctest.c @@ -25,6 +25,7 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "internal.h" #include "video.h" typedef struct { @@ -57,8 +58,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) AVFilterBufferRef *outpicref; int i; - outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, - outlink->w, outlink->h); + outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, + outlink->w, outlink->h); outpicref = outlink->out_buf; avfilter_copy_buffer_ref_props(outpicref, picref); diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c index fab0568d9a..184de0fe15 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c @@ -25,6 +25,7 @@ #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" #include "libavutil/avstring.h" #include "libavutil/eval.h" @@ -302,7 +303,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; - outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); + outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outpicref, picref); outpicref->video->w = outlink->w; outpicref->video->h = outlink->h; diff --git a/libavfilter/vf_setpts.c b/libavfilter/vf_setpts.c index e1f773483b..d73e77b57d 100644 --- a/libavfilter/vf_setpts.c +++ b/libavfilter/vf_setpts.c @@ -29,6 +29,7 @@ #include "libavutil/eval.h" #include "libavutil/mathematics.h" #include "avfilter.h" +#include "internal.h" #include "video.h" static const char *const var_names[] = { diff --git a/libavfilter/vf_slicify.c b/libavfilter/vf_slicify.c index 76d2b237bd..1af3239dbe 100644 --- a/libavfilter/vf_slicify.c +++ b/libavfilter/vf_slicify.c @@ -24,6 +24,7 @@ */ #include "avfilter.h" +#include "internal.h" #include "video.h" #include "libavutil/pixdesc.h" diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c index 3ac07f5870..512a5300d8 100644 --- a/libavfilter/vf_transpose.c +++ b/libavfilter/vf_transpose.c @@ -30,6 +30,7 @@ #include "libavutil/imgutils.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { @@ -117,8 +118,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterLink *outlink = inlink->dst->outputs[0]; - outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, - outlink->w, outlink->h); + outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, + outlink->w, outlink->h); outlink->out_buf->pts = picref->pts; if (picref->video->sample_aspect_ratio.num == 0) { diff --git a/libavfilter/vf_unsharp.c b/libavfilter/vf_unsharp.c index a280a0df7f..13f4157c3e 100644 --- a/libavfilter/vf_unsharp.c +++ b/libavfilter/vf_unsharp.c @@ -38,6 +38,7 @@ #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" #include "libavutil/common.h" #include "libavutil/mem.h" diff --git a/libavfilter/vf_vflip.c b/libavfilter/vf_vflip.c index 2c8436ff20..6fd5d863a3 100644 --- a/libavfilter/vf_vflip.c +++ b/libavfilter/vf_vflip.c @@ -25,6 +25,7 @@ #include "libavutil/pixdesc.h" #include "avfilter.h" +#include "internal.h" #include "video.h" typedef struct { @@ -50,7 +51,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, if (!(perms & AV_PERM_NEG_LINESIZES)) return ff_default_get_video_buffer(link, perms, w, h); - picref = avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h); + picref = ff_get_video_buffer(link->dst->outputs[0], perms, w, h); for (i = 0; i < 4; i ++) { int vsub = i == 1 || i == 2 ? flip->vsub : 0; diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index a0a5e05526..336f18def3 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -207,8 +207,8 @@ static void return_frame(AVFilterContext *ctx, int is_second) } if (is_second) { - yadif->out = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE, link->w, link->h); + yadif->out = ff_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | + AV_PERM_REUSE, link->w, link->h); avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); yadif->out->video->interlaced = 0; } @@ -269,8 +269,8 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) if (!yadif->prev) yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); - yadif->out = avfilter_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE, link->w, link->h); + yadif->out = ff_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE | + AV_PERM_REUSE, link->w, link->h); avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); yadif->out->video->interlaced = 0; diff --git a/libavfilter/video.c b/libavfilter/video.c index da1ae54b47..dfcfbb7387 100644 --- a/libavfilter/video.c +++ b/libavfilter/video.c @@ -28,7 +28,7 @@ AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h) { - return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h); + return ff_get_video_buffer(link->dst->outputs[0], perms, w, h); } AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h) @@ -127,7 +127,7 @@ fail: return NULL; } -AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h) +AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int h) { AVFilterBufferRef *ret = NULL; @@ -158,11 +158,11 @@ static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterLink *outlink = NULL; - if (inlink->dst->output_count) + if (inlink->dst->nb_outputs) outlink = inlink->dst->outputs[0]; if (outlink) { - outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); + outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outlink->out_buf, picref); ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0)); } @@ -191,7 +191,7 @@ void ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) picref->perms, link->dstpad->min_perms, link->dstpad->rej_perms); - link->cur_buf = avfilter_get_video_buffer(link, dst->min_perms, link->w, link->h); + link->cur_buf = ff_get_video_buffer(link, dst->min_perms, link->w, link->h); link->src_buf = picref; avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf); @@ -230,7 +230,7 @@ static void default_end_frame(AVFilterLink *inlink) { AVFilterLink *outlink = NULL; - if (inlink->dst->output_count) + if (inlink->dst->nb_outputs) outlink = inlink->dst->outputs[0]; avfilter_unref_buffer(inlink->cur_buf); @@ -271,7 +271,7 @@ static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir { AVFilterLink *outlink = NULL; - if (inlink->dst->output_count) + if (inlink->dst->nb_outputs) outlink = inlink->dst->outputs[0]; if (outlink) @@ -364,4 +364,8 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { ff_draw_slice(link, y, h, slice_dir); } +AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h) +{ + return ff_get_video_buffer(link, perms, w, h); +} #endif diff --git a/libavfilter/video.h b/libavfilter/video.h index b6886a6e8d..28835b9bf9 100644 --- a/libavfilter/video.h +++ b/libavfilter/video.h @@ -28,6 +28,19 @@ AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h); AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h); +/** + * Request a picture buffer with a specific set of permissions. + * + * @param link the output link to the filter from which the buffer will + * be requested + * @param perms the required access permissions + * @param w the minimum width of the buffer to allocate + * @param h the minimum height of the buffer to allocate + * @return A reference to the buffer. This must be unreferenced with + * avfilter_unref_buffer when you are finished with it. + */ +AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, + int w, int h); void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref); void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir); diff --git a/libavfilter/vsink_nullsink.c b/libavfilter/vsink_nullsink.c index 50fd728bfc..82d2d32b1e 100644 --- a/libavfilter/vsink_nullsink.c +++ b/libavfilter/vsink_nullsink.c @@ -17,6 +17,7 @@ */ #include "avfilter.h" +#include "internal.h" static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c index cde971450c..112c27c4eb 100644 --- a/libavfilter/vsrc_color.c +++ b/libavfilter/vsrc_color.c @@ -25,6 +25,7 @@ #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" #include "libavutil/pixdesc.h" #include "libavutil/colorspace.h" @@ -105,7 +106,7 @@ static int color_config_props(AVFilterLink *inlink) static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; - AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); + AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); picref->video->sample_aspect_ratio = (AVRational) {1, 1}; picref->pts = color->pts++; picref->pos = -1; diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c index 5de7abf51f..8334f3dcd9 100644 --- a/libavfilter/vsrc_testsrc.c +++ b/libavfilter/vsrc_testsrc.c @@ -37,6 +37,7 @@ #include "libavutil/parseutils.h" #include "avfilter.h" #include "formats.h" +#include "internal.h" #include "video.h" typedef struct { @@ -137,8 +138,7 @@ static int request_frame(AVFilterLink *outlink) if (test->max_pts >= 0 && test->pts >= test->max_pts) return AVERROR_EOF; - picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, - test->w, test->h); + picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h); picref->pts = test->pts++; picref->pos = -1; picref->video->key_frame = 1; diff --git a/libavformat/rtmpproto.c b/libavformat/rtmpproto.c index 5ac6151e4a..5fcec0733e 100644 --- a/libavformat/rtmpproto.c +++ b/libavformat/rtmpproto.c @@ -88,6 +88,8 @@ typedef struct RTMPContext { char* tcurl; ///< url of the target stream char* flashver; ///< version of the flash plugin char* swfurl; ///< url of the swf player + int server_bw; ///< server bandwidth + int client_buffer_time; ///< client buffer time in ms } RTMPContext; #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing @@ -115,7 +117,7 @@ static const uint8_t rtmp_server_key[] = { static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p) { - char *field, *value, *saveptr; + char *field, *value; char type; /* The type must be B for Boolean, N for number, S for string, O for @@ -130,8 +132,12 @@ static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p) value = param + 2; } else if (param[0] == 'N' && param[1] && param[2] == ':') { type = param[1]; - field = av_strtok(param + 3, ":", &saveptr); - value = av_strtok(NULL, ":", &saveptr); + field = param + 3; + value = strchr(field, ':'); + if (!value) + goto fail; + *value = '\0'; + value++; if (!field || !value) goto fail; @@ -226,18 +232,27 @@ static int gen_connect(URLContext *s, RTMPContext *rt) ff_amf_write_object_end(&p); if (rt->conn) { - char *param, *saveptr; + char *param = rt->conn; // Write arbitrary AMF data to the Connect message. - param = av_strtok(rt->conn, " ", &saveptr); while (param != NULL) { + char *sep; + param += strspn(param, " "); + if (!*param) + break; + sep = strchr(param, ' '); + if (sep) + *sep = '\0'; if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) { // Invalid AMF parameter. ff_rtmp_packet_destroy(&pkt); return ret; } - param = av_strtok(NULL, " ", &saveptr); + if (sep) + param = sep + 1; + else + break; } } @@ -394,6 +409,31 @@ static int gen_delete_stream(URLContext *s, RTMPContext *rt) } /** + * Generate client buffer time and send it to the server. + */ +static int gen_buffer_time(URLContext *s, RTMPContext *rt) +{ + RTMPPacket pkt; + uint8_t *p; + int ret; + + if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, + 1, 10)) < 0) + return ret; + + p = pkt.data; + bytestream_put_be16(&p, 3); + bytestream_put_be32(&p, rt->main_channel_id); + bytestream_put_be32(&p, rt->client_buffer_time); + + ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, + rt->prev_pkt[1]); + ff_rtmp_packet_destroy(&pkt); + + return ret; +} + +/** * Generate 'play' call and send it to the server, then ping the server * to start actual playing. */ @@ -422,23 +462,6 @@ static int gen_play(URLContext *s, RTMPContext *rt) rt->prev_pkt[1]); ff_rtmp_packet_destroy(&pkt); - if (ret < 0) - return ret; - - // set client buffer time disguised in ping packet - if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, - 1, 10)) < 0) - return ret; - - p = pkt.data; - bytestream_put_be16(&p, 3); - bytestream_put_be32(&p, 1); - bytestream_put_be32(&p, 256); //TODO: what is a good value here? - - ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, - rt->prev_pkt[1]); - ff_rtmp_packet_destroy(&pkt); - return ret; } @@ -510,7 +533,7 @@ static int gen_server_bw(URLContext *s, RTMPContext *rt) return ret; p = pkt.data; - bytestream_put_be32(&p, 2500000); + bytestream_put_be32(&p, rt->server_bw); ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]); ff_rtmp_packet_destroy(&pkt); @@ -838,6 +861,14 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt) av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data)); rt->client_report_size = AV_RB32(pkt->data) >> 1; break; + case RTMP_PT_SERVER_BW: + rt->server_bw = AV_RB32(pkt->data); + if (rt->server_bw <= 0) { + av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n", rt->server_bw); + return AVERROR(EINVAL); + } + av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw); + break; case RTMP_PT_INVOKE: //TODO: check for the messages sent for wrong state? if (!memcmp(pkt->data, "\002\000\006_error", 9)) { @@ -888,6 +919,8 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt) if (rt->is_input) { if ((ret = gen_play(s, rt)) < 0) return ret; + if ((ret = gen_buffer_time(s, rt)) < 0) + return ret; } else { if ((ret = gen_publish(s, rt)) < 0) return ret; @@ -924,6 +957,9 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt) return ret; } break; + default: + av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type); + break; } return 0; } @@ -1182,6 +1218,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags) rt->client_report_size = 1048576; rt->bytes_read = 0; rt->last_bytes_read = 0; + rt->server_bw = 2500000; av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n", proto, path, rt->app, rt->playpath); @@ -1328,6 +1365,7 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size) static const AVOption rtmp_options[] = { {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, + {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {3000}, 0, INT_MAX, DEC|ENC}, {"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"}, diff --git a/libswscale/input.c b/libswscale/input.c index 018cd30adb..c9c91d0bca 100644 --- a/libswscale/input.c +++ b/libswscale/input.c @@ -677,80 +677,120 @@ static void planar_rgb_to_y(uint16_t *dst, const uint8_t *src[4], int width) } } -static void planar_rgb16le_to_y(uint8_t *_dst, const uint8_t *_src[4], int width) +static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[4], int width) { int i; - const uint16_t **src = (const uint16_t **)_src; - uint16_t *dst = (uint16_t *)_dst; for (i = 0; i < width; i++) { - int g = AV_RL16(src[0] + i); - int b = AV_RL16(src[1] + i); - int r = AV_RL16(src[2] + i); + int g = src[0][i]; + int b = src[1][i]; + int r = src[2][i]; - dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); + dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); } } -static void planar_rgb16be_to_y(uint8_t *_dst, const uint8_t *_src[4], int width) +#define rdpx(src) \ + is_be ? AV_RB16(src) : AV_RL16(src) +static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4], + int width, int bpc, int is_be) { int i; const uint16_t **src = (const uint16_t **)_src; uint16_t *dst = (uint16_t *)_dst; for (i = 0; i < width; i++) { - int g = AV_RB16(src[0] + i); - int b = AV_RB16(src[1] + i); - int r = AV_RB16(src[2] + i); + int g = rdpx(src[0] + i); + int b = rdpx(src[1] + i); + int r = rdpx(src[2] + i); - dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT); } } -static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[4], int width) +static void planar_rgb9le_to_y(uint8_t *dst, const uint8_t *src[4], int w) { - int i; - for (i = 0; i < width; i++) { - int g = src[0][i]; - int b = src[1][i]; - int r = src[2][i]; + planar_rgb16_to_y(dst, src, w, 9, 0); +} - dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); - dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); - } +static void planar_rgb9be_to_y(uint8_t *dst, const uint8_t *src[4], int w) +{ + planar_rgb16_to_y(dst, src, w, 9, 1); } -static void planar_rgb16le_to_uv(uint8_t *_dstU, uint8_t *_dstV, - const uint8_t *_src[4], int width) +static void planar_rgb10le_to_y(uint8_t *dst, const uint8_t *src[4], int w) { - int i; - const uint16_t **src = (const uint16_t **)_src; - uint16_t *dstU = (uint16_t *)_dstU; - uint16_t *dstV = (uint16_t *)_dstV; - for (i = 0; i < width; i++) { - int g = AV_RL16(src[0] + i); - int b = AV_RL16(src[1] + i); - int r = AV_RL16(src[2] + i); + planar_rgb16_to_y(dst, src, w, 10, 0); +} - dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - } +static void planar_rgb10be_to_y(uint8_t *dst, const uint8_t *src[4], int w) +{ + planar_rgb16_to_y(dst, src, w, 10, 1); +} + +static void planar_rgb16le_to_y(uint8_t *dst, const uint8_t *src[4], int w) +{ + planar_rgb16_to_y(dst, src, w, 16, 0); } -static void planar_rgb16be_to_uv(uint8_t *_dstU, uint8_t *_dstV, - const uint8_t *_src[4], int width) +static void planar_rgb16be_to_y(uint8_t *dst, const uint8_t *src[4], int w) +{ + planar_rgb16_to_y(dst, src, w, 16, 1); +} + +static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV, + const uint8_t *_src[4], int width, + int bpc, int is_be) { int i; const uint16_t **src = (const uint16_t **)_src; uint16_t *dstU = (uint16_t *)_dstU; uint16_t *dstV = (uint16_t *)_dstV; for (i = 0; i < width; i++) { - int g = AV_RB16(src[0] + i); - int b = AV_RB16(src[1] + i); - int r = AV_RB16(src[2] + i); + int g = rdpx(src[0] + i); + int b = rdpx(src[1] + i); + int r = rdpx(src[2] + i); - dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); + dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT; + dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT; } } +#undef rdpx + +static void planar_rgb9le_to_uv(uint8_t *dstU, uint8_t *dstV, + const uint8_t *src[4], int w) +{ + planar_rgb16_to_uv(dstU, dstV, src, w, 9, 0); +} + +static void planar_rgb9be_to_uv(uint8_t *dstU, uint8_t *dstV, + const uint8_t *src[4], int w) +{ + planar_rgb16_to_uv(dstU, dstV, src, w, 9, 1); +} + +static void planar_rgb10le_to_uv(uint8_t *dstU, uint8_t *dstV, + const uint8_t *src[4], int w) +{ + planar_rgb16_to_uv(dstU, dstV, src, w, 10, 0); +} + +static void planar_rgb10be_to_uv(uint8_t *dstU, uint8_t *dstV, + const uint8_t *src[4], int w) +{ + planar_rgb16_to_uv(dstU, dstV, src, w, 10, 1); +} + +static void planar_rgb16le_to_uv(uint8_t *dstU, uint8_t *dstV, + const uint8_t *src[4], int w) +{ + planar_rgb16_to_uv(dstU, dstV, src, w, 16, 0); +} + +static void planar_rgb16be_to_uv(uint8_t *dstU, uint8_t *dstV, + const uint8_t *src[4], int w) +{ + planar_rgb16_to_uv(dstU, dstV, src, w, 16, 1); +} av_cold void ff_sws_init_input_funcs(SwsContext *c) { @@ -778,12 +818,20 @@ av_cold void ff_sws_init_input_funcs(SwsContext *c) c->chrToYV12 = palToUV_c; break; case PIX_FMT_GBRP9LE: + c->readChrPlanar = planar_rgb9le_to_uv; + break; case PIX_FMT_GBRP10LE: + c->readChrPlanar = planar_rgb10le_to_uv; + break; case PIX_FMT_GBRP16LE: c->readChrPlanar = planar_rgb16le_to_uv; break; case PIX_FMT_GBRP9BE: + c->readChrPlanar = planar_rgb9be_to_uv; + break; case PIX_FMT_GBRP10BE: + c->readChrPlanar = planar_rgb10be_to_uv; + break; case PIX_FMT_GBRP16BE: c->readChrPlanar = planar_rgb16be_to_uv; break; @@ -975,12 +1023,20 @@ av_cold void ff_sws_init_input_funcs(SwsContext *c) c->alpToYV12 = NULL; switch (srcFormat) { case PIX_FMT_GBRP9LE: + c->readLumPlanar = planar_rgb9le_to_y; + break; case PIX_FMT_GBRP10LE: + c->readLumPlanar = planar_rgb10le_to_y; + break; case PIX_FMT_GBRP16LE: c->readLumPlanar = planar_rgb16le_to_y; break; case PIX_FMT_GBRP9BE: + c->readLumPlanar = planar_rgb9be_to_y; + break; case PIX_FMT_GBRP10BE: + c->readLumPlanar = planar_rgb10be_to_y; + break; case PIX_FMT_GBRP16BE: c->readLumPlanar = planar_rgb16be_to_y; break; diff --git a/tests/Makefile b/tests/Makefile index 53c181131c..d91c07950f 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -148,7 +148,7 @@ fate-list: clean:: testclean testclean: - $(RM) -r tests/vsynth1 tests/vsynth2 tests/data tools/lavfi-showfiltfmts$(EXESUF) + $(RM) -r tests/vsynth1 tests/data tools/lavfi-showfiltfmts$(EXESUF) $(RM) $(CLEANSUFFIXES:%=tests/%) $(RM) $(TESTTOOLS:%=tests/%$(HOSTEXESUF)) |