aboutsummaryrefslogtreecommitdiffstats
path: root/libavfilter
diff options
context:
space:
mode:
authorStefano Sabatini <stefano.sabatini-lala@poste.it>2011-07-04 11:35:39 +0300
committerStefano Sabatini <stefasab@gmail.com>2011-09-16 20:44:27 +0200
commit37cc443c83589b172a0cbc807e05059e3ce4582b (patch)
treed32466e2882c7b6e466ea0a61bbc4ddd646adcf3 /libavfilter
parent553c5e9f234d062d921a4150b64ffd9b19c05135 (diff)
downloadffmpeg-37cc443c83589b172a0cbc807e05059e3ce4582b.tar.gz
lavfi: add audio convert filter
Add aconvert filter to perform sample format, channel layout, and packing format conversion. The aconvert code depends on audio conversion code in libavcodec, so this requires a dependency on libavcodec. Based on previous work by S.N. Hemanth Meenakshisundaram and Mina Nagy Zaki, performed for the GSoC 2010 and 2011.
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/Makefile3
-rw-r--r--libavfilter/af_aconvert.c417
-rw-r--r--libavfilter/af_aconvert_rematrix.c172
-rw-r--r--libavfilter/allfilters.c1
-rw-r--r--libavfilter/avfilter.h2
5 files changed, 594 insertions, 1 deletions
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 9ea5c4c23d..f1f04068bb 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -2,6 +2,8 @@ include $(SUBDIR)../config.mak
NAME = avfilter
FFLIBS = avutil
+
+FFLIBS-$(CONFIG_ACONVERT_FILTER) += avcodec
FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_ARESAMPLE_FILTER) += avcodec
FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
@@ -20,6 +22,7 @@ OBJS = allfilters.o \
OBJS-$(CONFIG_AVCODEC) += avcodec.o
+OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
diff --git a/libavfilter/af_aconvert.c b/libavfilter/af_aconvert.c
new file mode 100644
index 0000000000..d794c23576
--- /dev/null
+++ b/libavfilter/af_aconvert.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2011 Mina Nagy Zaki
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * sample format and channel layout conversion audio filter
+ * based on code in libavcodec/resample.c by Fabrice Bellard and
+ * libavcodec/audioconvert.c by Michael Niedermayer
+ */
+
+#include "libavutil/audioconvert.h"
+#include "libavcodec/audioconvert.h"
+#include "avfilter.h"
+#include "internal.h"
+
+typedef struct {
+ enum AVSampleFormat out_sample_fmt, in_sample_fmt; ///< in/out sample formats
+ int64_t out_chlayout, in_chlayout; ///< in/out channel layout
+ int out_nb_channels, in_nb_channels; ///< number of in/output channels
+ enum AVFilterPacking out_packing_fmt, in_packing_fmt; ///< output packing format
+
+ int max_nb_samples; ///< maximum number of buffered samples
+ AVFilterBufferRef *mix_samplesref; ///< rematrixed buffer
+ AVFilterBufferRef *out_samplesref; ///< output buffer after required conversions
+
+ uint8_t *in_mix[8], *out_mix[8]; ///< input/output for rematrixing functions
+ uint8_t *packed_data[8]; ///< pointers for packing conversion
+ int out_strides[8], in_strides[8]; ///< input/output strides for av_audio_convert
+ uint8_t **in_conv, **out_conv; ///< input/output for av_audio_convert
+
+ AVAudioConvert *audioconvert_ctx; ///< context for conversion to output sample format
+
+ void (*convert_chlayout)(); ///< function to do the requested rematrixing
+} AConvertContext;
+
+#define REMATRIX_FUNC_SIG(NAME) static void REMATRIX_FUNC_NAME(NAME) \
+ (FMT_TYPE *outp[], FMT_TYPE *inp[], int nb_samples, AConvertContext *aconvert)
+
+#define FMT_TYPE uint8_t
+#define REMATRIX_FUNC_NAME(NAME) NAME ## _u8
+#include "af_aconvert_rematrix.c"
+
+#define FMT_TYPE int16_t
+#define REMATRIX_FUNC_NAME(NAME) NAME ## _s16
+#include "af_aconvert_rematrix.c"
+
+#define FMT_TYPE int32_t
+#define REMATRIX_FUNC_NAME(NAME) NAME ## _s32
+#include "af_aconvert_rematrix.c"
+
+#define FLOATING
+
+#define FMT_TYPE float
+#define REMATRIX_FUNC_NAME(NAME) NAME ## _flt
+#include "af_aconvert_rematrix.c"
+
+#define FMT_TYPE double
+#define REMATRIX_FUNC_NAME(NAME) NAME ## _dbl
+#include "af_aconvert_rematrix.c"
+
+#define FMT_TYPE uint8_t
+#define REMATRIX_FUNC_NAME(NAME) NAME
+REMATRIX_FUNC_SIG(stereo_remix_planar)
+{
+ int size = av_get_bytes_per_sample(aconvert->in_sample_fmt) * nb_samples;
+
+ memcpy(outp[0], inp[0], size);
+ memcpy(outp[1], inp[aconvert->in_nb_channels == 1 ? 0 : 1], size);
+}
+
+#define REGISTER_FUNC_PACKING(INCHLAYOUT, OUTCHLAYOUT, FUNC, PACKING) \
+ {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_U8, FUNC##_u8}, \
+ {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_S16, FUNC##_s16}, \
+ {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_S32, FUNC##_s32}, \
+ {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_FLT, FUNC##_flt}, \
+ {INCHLAYOUT, OUTCHLAYOUT, PACKING, AV_SAMPLE_FMT_DBL, FUNC##_dbl},
+
+#define REGISTER_FUNC(INCHLAYOUT, OUTCHLAYOUT, FUNC) \
+ REGISTER_FUNC_PACKING(INCHLAYOUT, OUTCHLAYOUT, FUNC##_packed, AVFILTER_PACKED) \
+ REGISTER_FUNC_PACKING(INCHLAYOUT, OUTCHLAYOUT, FUNC##_planar, AVFILTER_PLANAR)
+
+static struct RematrixFunctionInfo {
+ int64_t in_chlayout, out_chlayout;
+ int planar, sfmt;
+ void (*func)();
+} rematrix_funcs[] = {
+ REGISTER_FUNC (AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_5POINT1, stereo_to_surround_5p1)
+ REGISTER_FUNC (AV_CH_LAYOUT_5POINT1, AV_CH_LAYOUT_STEREO, surround_5p1_to_stereo)
+ REGISTER_FUNC_PACKING(AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_MONO, stereo_to_mono_packed, AVFILTER_PACKED)
+ REGISTER_FUNC_PACKING(AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_STEREO, mono_to_stereo_packed, AVFILTER_PACKED)
+ REGISTER_FUNC (0, AV_CH_LAYOUT_MONO, mono_downmix)
+ REGISTER_FUNC_PACKING(0, AV_CH_LAYOUT_STEREO, stereo_downmix_packed, AVFILTER_PACKED)
+
+ // This function works for all sample formats
+ {0, AV_CH_LAYOUT_STEREO, AVFILTER_PLANAR, -1, stereo_remix_planar}
+};
+
+static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
+{
+ AConvertContext *aconvert = ctx->priv;
+ char *arg, *ptr = NULL;
+ int ret = 0;
+ char *args = av_strdup(args0);
+
+ aconvert->out_sample_fmt = AV_SAMPLE_FMT_NONE;
+ aconvert->out_chlayout = 0;
+ aconvert->out_packing_fmt = -1;
+
+ if ((arg = strtok_r(args, ":", &ptr)) && strcmp(arg, "auto")) {
+ if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0)
+ goto end;
+ }
+ if ((arg = strtok_r(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
+ if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0)
+ goto end;
+ }
+ if ((arg = strtok_r(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
+ if ((ret = ff_parse_packing_format((int *)&aconvert->out_packing_fmt, arg, ctx)) < 0)
+ goto end;
+ }
+
+end:
+ av_freep(&args);
+ return ret;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ AConvertContext *aconvert = ctx->priv;
+ avfilter_unref_buffer(aconvert->mix_samplesref);
+ avfilter_unref_buffer(aconvert->out_samplesref);
+ if (aconvert->audioconvert_ctx)
+ av_audio_convert_free(aconvert->audioconvert_ctx);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ AVFilterFormats *formats = NULL;
+ AConvertContext *aconvert = ctx->priv;
+ AVFilterLink *inlink = ctx->inputs[0];
+ AVFilterLink *outlink = ctx->outputs[0];
+
+ avfilter_formats_ref(avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO),
+ &inlink->out_formats);
+ if (aconvert->out_sample_fmt != AV_SAMPLE_FMT_NONE) {
+ formats = NULL;
+ avfilter_add_format(&formats, aconvert->out_sample_fmt);
+ avfilter_formats_ref(formats, &outlink->in_formats);
+ } else
+ avfilter_formats_ref(avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO),
+ &outlink->in_formats);
+
+ avfilter_formats_ref(avfilter_make_all_channel_layouts(),
+ &inlink->out_chlayouts);
+ if (aconvert->out_chlayout != 0) {
+ formats = NULL;
+ avfilter_add_format(&formats, aconvert->out_chlayout);
+ avfilter_formats_ref(formats, &outlink->in_chlayouts);
+ } else
+ avfilter_formats_ref(avfilter_make_all_channel_layouts(),
+ &outlink->in_chlayouts);
+
+ avfilter_formats_ref(avfilter_make_all_packing_formats(),
+ &inlink->out_packing);
+ if (aconvert->out_packing_fmt != -1) {
+ formats = NULL;
+ avfilter_add_format(&formats, aconvert->out_packing_fmt);
+ avfilter_formats_ref(formats, &outlink->in_packing);
+ } else
+ avfilter_formats_ref(avfilter_make_all_packing_formats(),
+ &outlink->in_packing);
+
+ return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ AConvertContext *aconvert = outlink->src->priv;
+ char buf1[64], buf2[64];
+
+ aconvert->in_sample_fmt = inlink->format;
+ aconvert->in_packing_fmt = inlink->planar;
+ if (aconvert->out_packing_fmt == -1)
+ aconvert->out_packing_fmt = outlink->planar;
+ aconvert->in_chlayout = inlink->channel_layout;
+ aconvert->in_nb_channels =
+ av_get_channel_layout_nb_channels(inlink->channel_layout);
+
+ /* if not specified in args, use the format and layout of the output */
+ if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE)
+ aconvert->out_sample_fmt = outlink->format;
+ if (aconvert->out_chlayout == 0)
+ aconvert->out_chlayout = outlink->channel_layout;
+ aconvert->out_nb_channels =
+ av_get_channel_layout_nb_channels(outlink->channel_layout);
+
+ av_get_channel_layout_string(buf1, sizeof(buf1),
+ -1, inlink ->channel_layout);
+ av_get_channel_layout_string(buf2, sizeof(buf2),
+ -1, outlink->channel_layout);
+ av_log(outlink->src, AV_LOG_INFO,
+ "fmt:%s cl:%s planar:%i -> fmt:%s cl:%s planar:%i\n",
+ av_get_sample_fmt_name(inlink ->format), buf1, inlink ->planar,
+ av_get_sample_fmt_name(outlink->format), buf2, outlink->planar);
+
+ /* compute which channel layout conversion to use */
+ if (inlink->channel_layout != outlink->channel_layout) {
+ int i;
+ for (i = 0; i < sizeof(rematrix_funcs); i++) {
+ const struct RematrixFunctionInfo *f = &rematrix_funcs[i];
+ if ((f->in_chlayout == 0 || f->in_chlayout == inlink ->channel_layout) &&
+ (f->out_chlayout == 0 || f->out_chlayout == outlink->channel_layout) &&
+ (f->planar == -1 || f->planar == inlink->planar) &&
+ (f->sfmt == -1 || f->sfmt == inlink->format)
+ ) {
+ aconvert->convert_chlayout = f->func;
+ break;
+ }
+ }
+ if (!aconvert->convert_chlayout) {
+ av_log(outlink->src, AV_LOG_ERROR,
+ "Unsupported channel layout conversion '%s -> %s' requested!\n",
+ buf1, buf2);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int init_buffers(AVFilterLink *inlink, int nb_samples)
+{
+ AConvertContext *aconvert = inlink->dst->priv;
+ AVFilterLink * const outlink = inlink->dst->outputs[0];
+ int i, packed_stride = 0;
+ const unsigned
+ packing_conv = inlink->planar != outlink->planar &&
+ aconvert->out_nb_channels != 1,
+ format_conv = inlink->format != outlink->format;
+ int nb_channels = aconvert->out_nb_channels;
+
+ uninit(inlink->dst);
+ aconvert->max_nb_samples = nb_samples;
+
+ if (aconvert->convert_chlayout) {
+ /* allocate buffer for storing intermediary mixing samplesref */
+ uint8_t *data[8];
+ int linesize[8];
+ int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
+
+ if (av_samples_alloc(data, linesize, nb_channels, nb_samples,
+ inlink->format, inlink->planar, 16) < 0)
+ goto fail_no_mem;
+ aconvert->mix_samplesref =
+ avfilter_get_audio_buffer_ref_from_arrays(data, linesize, AV_PERM_WRITE,
+ nb_samples, inlink->format,
+ outlink->channel_layout,
+ inlink->planar);
+ if (!aconvert->mix_samplesref)
+ goto fail_no_mem;
+ }
+
+ // if there's a format/packing conversion we need an audio_convert context
+ if (format_conv || packing_conv) {
+ aconvert->out_samplesref =
+ avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
+ if (!aconvert->out_samplesref)
+ goto fail_no_mem;
+
+ aconvert->in_strides [0] = av_get_bytes_per_sample(inlink ->format);
+ aconvert->out_strides[0] = av_get_bytes_per_sample(outlink->format);
+
+ aconvert->out_conv = aconvert->out_samplesref->data;
+ if (aconvert->mix_samplesref)
+ aconvert->in_conv = aconvert->mix_samplesref->data;
+
+ if (packing_conv) {
+ // packed -> planar
+ if (outlink->planar == AVFILTER_PLANAR) {
+ if (aconvert->mix_samplesref)
+ aconvert->packed_data[0] = aconvert->mix_samplesref->data[0];
+ aconvert->in_conv = aconvert->packed_data;
+ packed_stride = aconvert->in_strides[0];
+ aconvert->in_strides[0] *= nb_channels;
+ // planar -> packed
+ } else {
+ aconvert->packed_data[0] = aconvert->out_samplesref->data[0];
+ aconvert->out_conv = aconvert->packed_data;
+ packed_stride = aconvert->out_strides[0];
+ aconvert->out_strides[0] *= nb_channels;
+ }
+ } else if (outlink->planar == AVFILTER_PACKED) {
+ /* If there's no packing conversion, and the stream is packed
+ * then we treat the entire stream as one big channel
+ */
+ nb_channels = 1;
+ }
+
+ for (i = 1; i < nb_channels; i++) {
+ aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride;
+ aconvert->in_strides[i] = aconvert->in_strides[0];
+ aconvert->out_strides[i] = aconvert->out_strides[0];
+ }
+
+ aconvert->audioconvert_ctx =
+ av_audio_convert_alloc(outlink->format, nb_channels,
+ inlink->format, nb_channels, NULL, 0);
+ if (!aconvert->audioconvert_ctx)
+ goto fail_no_mem;
+ }
+
+ return 0;
+
+fail_no_mem:
+ av_log(inlink->dst, AV_LOG_ERROR, "Could not allocate memory.\n");
+ return AVERROR(ENOMEM);
+}
+
+static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
+{
+ AConvertContext *aconvert = inlink->dst->priv;
+ AVFilterBufferRef *curbuf = insamplesref;
+ AVFilterLink * const outlink = inlink->dst->outputs[0];
+ int chan_mult;
+
+ /* in/reinint the internal buffers if this is the first buffer
+ * provided or it is needed to use a bigger one */
+ if (!aconvert->max_nb_samples ||
+ (curbuf->audio->nb_samples > aconvert->max_nb_samples))
+ if (init_buffers(inlink, curbuf->audio->nb_samples) < 0) {
+ av_log(inlink->dst, AV_LOG_ERROR, "Could not initialize buffers.\n");
+ return;
+ }
+
+ /* if channel mixing is required */
+ if (aconvert->mix_samplesref) {
+ memcpy(aconvert->in_mix, curbuf->data, sizeof(aconvert->in_mix));
+ memcpy(aconvert->out_mix, aconvert->mix_samplesref->data, sizeof(aconvert->out_mix));
+ aconvert->convert_chlayout(aconvert->out_mix,
+ aconvert->in_mix,
+ curbuf->audio->nb_samples,
+ aconvert);
+ curbuf = aconvert->mix_samplesref;
+ }
+
+ if (aconvert->audioconvert_ctx) {
+ if (!aconvert->mix_samplesref) {
+ if (aconvert->in_conv == aconvert->packed_data) {
+ int i, packed_stride = av_get_bytes_per_sample(inlink->format);
+ aconvert->packed_data[0] = curbuf->data[0];
+ for (i = 1; i < aconvert->out_nb_channels; i++)
+ aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride;
+ } else {
+ aconvert->in_conv = curbuf->data;
+ }
+ }
+
+ chan_mult = inlink->planar == outlink->planar && inlink->planar == 0 ?
+ aconvert->out_nb_channels : 1;
+
+ av_audio_convert(aconvert->audioconvert_ctx,
+ (void * const *) aconvert->out_conv,
+ aconvert->out_strides,
+ (const void * const *) aconvert->in_conv,
+ aconvert->in_strides,
+ curbuf->audio->nb_samples * chan_mult);
+
+ curbuf = aconvert->out_samplesref;
+ }
+
+ avfilter_copy_buffer_ref_props(curbuf, insamplesref);
+ curbuf->audio->channel_layout = outlink->channel_layout;
+ curbuf->audio->planar = outlink->planar;
+
+ avfilter_filter_samples(inlink->dst->outputs[0],
+ avfilter_ref_buffer(curbuf, ~0));
+ avfilter_unref_buffer(insamplesref);
+}
+
+AVFilter avfilter_af_aconvert = {
+ .name = "aconvert",
+ .description = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout:packed_fmt."),
+ .priv_size = sizeof(AConvertContext),
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .filter_samples = filter_samples,
+ .min_perms = AV_PERM_READ, },
+ { .name = NULL}},
+ .outputs = (AVFilterPad[]) {{ .name = "default",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .config_props = config_output, },
+ { .name = NULL}},
+};
diff --git a/libavfilter/af_aconvert_rematrix.c b/libavfilter/af_aconvert_rematrix.c
new file mode 100644
index 0000000000..d75ca5aa40
--- /dev/null
+++ b/libavfilter/af_aconvert_rematrix.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2011 Mina Nagy Zaki
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * audio rematrixing functions, based on functions from libavcodec/resample.c
+ */
+
+#if defined(FLOATING)
+# define DIV2 /2
+#else
+# define DIV2 >>1
+#endif
+
+REMATRIX_FUNC_SIG(stereo_to_mono_packed)
+{
+ while (nb_samples >= 4) {
+ outp[0][0] = (inp[0][0] + inp[0][1]) DIV2;
+ outp[0][1] = (inp[0][2] + inp[0][3]) DIV2;
+ outp[0][2] = (inp[0][4] + inp[0][5]) DIV2;
+ outp[0][3] = (inp[0][6] + inp[0][7]) DIV2;
+ outp[0] += 4;
+ inp[0] += 8;
+ nb_samples -= 4;
+ }
+ while (nb_samples--) {
+ outp[0][0] = (inp[0][0] + inp[0][1]) DIV2;
+ outp[0]++;
+ inp[0] += 2;
+ }
+}
+
+REMATRIX_FUNC_SIG(stereo_downmix_packed)
+{
+ while (nb_samples--) {
+ *outp[0]++ = inp[0][0];
+ *outp[0]++ = inp[0][1];
+ inp[0] += aconvert->in_nb_channels;
+ }
+}
+
+REMATRIX_FUNC_SIG(mono_to_stereo_packed)
+{
+ while (nb_samples >= 4) {
+ outp[0][0] = outp[0][1] = inp[0][0];
+ outp[0][2] = outp[0][3] = inp[0][1];
+ outp[0][4] = outp[0][5] = inp[0][2];
+ outp[0][6] = outp[0][7] = inp[0][3];
+ outp[0] += 8;
+ inp[0] += 4;
+ nb_samples -= 4;
+ }
+ while (nb_samples--) {
+ outp[0][0] = outp[0][1] = inp[0][0];
+ outp[0] += 2;
+ inp[0] += 1;
+ }
+}
+
+/**
+ * This is for when we have more than 2 input channels, need to downmix to mono
+ * and do not have a conversion formula available. We just use first two input
+ * channels - left and right. This is a placeholder until more conversion
+ * functions are written.
+ */
+REMATRIX_FUNC_SIG(mono_downmix_packed)
+{
+ while (nb_samples--) {
+ outp[0][0] = (inp[0][0] + inp[0][1]) DIV2;
+ inp[0] += aconvert->in_nb_channels;
+ outp[0]++;
+ }
+}
+
+REMATRIX_FUNC_SIG(mono_downmix_planar)
+{
+ FMT_TYPE *out = outp[0];
+
+ while (nb_samples >= 4) {
+ out[0] = (inp[0][0] + inp[1][0]) DIV2;
+ out[1] = (inp[0][1] + inp[1][1]) DIV2;
+ out[2] = (inp[0][2] + inp[1][2]) DIV2;
+ out[3] = (inp[0][3] + inp[1][3]) DIV2;
+ out += 4;
+ inp[0] += 4;
+ inp[1] += 4;
+ nb_samples -= 4;
+ }
+ while (nb_samples--) {
+ out[0] = (inp[0][0] + inp[1][0]) DIV2;
+ out++;
+ inp[0]++;
+ inp[1]++;
+ }
+}
+
+/* Stereo to 5.1 output */
+REMATRIX_FUNC_SIG(stereo_to_surround_5p1_packed)
+{
+ while (nb_samples--) {
+ outp[0][0] = inp[0][0]; /* left */
+ outp[0][1] = inp[0][1]; /* right */
+ outp[0][2] = (inp[0][0] + inp[0][1]) DIV2; /* center */
+ outp[0][3] = 0; /* low freq */
+ outp[0][4] = 0; /* FIXME: left surround: -3dB or -6dB or -9dB of stereo left */
+ outp[0][5] = 0; /* FIXME: right surroud: -3dB or -6dB or -9dB of stereo right */
+ inp[0] += 2;
+ outp[0] += 6;
+ }
+}
+
+REMATRIX_FUNC_SIG(stereo_to_surround_5p1_planar)
+{
+ while (nb_samples--) {
+ *outp[0]++ = *inp[0]; /* left */
+ *outp[1]++ = *inp[1]; /* right */
+ *outp[2]++ = (*inp[0] + *inp[1]) DIV2; /* center */
+ *outp[3]++ = 0; /* low freq */
+ *outp[4]++ = 0; /* FIXME: left surround: -3dB or -6dB or -9dB of stereo left */
+ *outp[5]++ = 0; /* FIXME: right surroud: -3dB or -6dB or -9dB of stereo right */
+ inp[0]++; inp[1]++;
+ }
+}
+
+
+/*
+5.1 to stereo input: [fl, fr, c, lfe, rl, rr]
+- Left = front_left + rear_gain * rear_left + center_gain * center
+- Right = front_right + rear_gain * rear_right + center_gain * center
+Where rear_gain is usually around 0.5-1.0 and
+ center_gain is almost always 0.7 (-3 dB)
+*/
+REMATRIX_FUNC_SIG(surround_5p1_to_stereo_packed)
+{
+ while (nb_samples--) {
+ *outp[0]++ = inp[0][0] + (0.5 * inp[0][4]) + (0.7 * inp[0][2]); //FIXME CLIPPING!
+ *outp[0]++ = inp[0][1] + (0.5 * inp[0][5]) + (0.7 * inp[0][2]); //FIXME CLIPPING!
+
+ inp[0] += 6;
+ }
+}
+
+REMATRIX_FUNC_SIG(surround_5p1_to_stereo_planar)
+{
+ while (nb_samples--) {
+ *outp[0]++ = *inp[0] + (0.5 * *inp[4]) + (0.7 * *inp[2]); //FIXME CLIPPING!
+ *outp[1]++ = *inp[1] + (0.5 * *inp[5]) + (0.7 * *inp[2]); //FIXME CLIPPING!
+
+ inp[0]++; inp[1]++; inp[2]++; inp[3]++; inp[4]++; inp[5]++;
+ }
+}
+
+#undef DIV2
+#undef REMATRIX_FUNC_NAME
+#undef FMT_TYPE
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index bff23713a6..dcabb68da0 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -34,6 +34,7 @@ void avfilter_register_all(void)
return;
initialized = 1;
+ REGISTER_FILTER (ACONVERT, aconvert, af);
REGISTER_FILTER (AFORMAT, aformat, af);
REGISTER_FILTER (ANULL, anull, af);
REGISTER_FILTER (ARESAMPLE, aresample, af);
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index f1ebd09735..19eaf4e584 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -29,7 +29,7 @@
#include "libavutil/rational.h"
#define LIBAVFILTER_VERSION_MAJOR 2
-#define LIBAVFILTER_VERSION_MINOR 42
+#define LIBAVFILTER_VERSION_MINOR 43
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \