aboutsummaryrefslogtreecommitdiffstats
path: root/libavfilter
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-05-15 19:18:02 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-05-15 19:18:02 +0200
commitd46aada5c2c71b9b0a259e62699cab25837053b2 (patch)
treeab474244a6fda04d8a10d25201620cdaee11c3c6 /libavfilter
parent66b1f210c024a08ba00e4a730e64940d248b8717 (diff)
parent5a153604c930792aa7f00c55cbf3c470f582dfb7 (diff)
downloadffmpeg-d46aada5c2c71b9b0a259e62699cab25837053b2.tar.gz
Merge branch 'master' into oldabi
* master: (403 commits) Initial caf muxer. Support decoding of amr_nb and gsm in caf. Fix decoding of msrle samples with 1bpp. udp: remove resource.h inclusion, it breaks mingw compilation. ffmpeg: Allow seting and cycling through debug modes. Fix FSF address copy paste error in some license headers. Add an aac sample which uses LTP to fate-aac. ffmpeg: Help for interactive keys. UDP: dont use thread_t as truth value. swscale: fix compile on mingw32 [PATCH] Update pixdesc_be fate refs after adding 9/10bit YUV420P formats. arm: properly mark external symbol call ffmpeg: Interactivity support. Try pressing +-hs. swscale: 10l forgot git add this change from ronald. AVFrame: only set parameters from AVCodecContext in decode_video*() when no frame reordering is used. avcodec_default_get_buffer: init picture parameters. swscale: properly inline bits/endianness in yuv2yuvX16inC(). swscale: fix clipping of 9/10bit YUV420P. Add av_clip_uintp2() function Support more QT 1bpp rawvideo files. ... Conflicts: libavcodec/flacenc.c libavcodec/h261dec.c libavcodec/h263dec.c libavcodec/mpeg12.c libavcodec/msrle.c libavcodec/options.c libavcodec/qpeg.c libavcodec/rv34.c libavcodec/svq1dec.c libavcodec/svq3.c libavcodec/vc1dec.c libavcodec/version.h libavfilter/avfilter.h libavformat/file.c libavformat/options.c libavformat/rtpproto.c libavformat/udp.c libavutil/avutil.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/Makefile4
-rw-r--r--libavfilter/avcodec.c42
-rw-r--r--libavfilter/avcodec.h40
-rw-r--r--libavfilter/avfilter.c69
-rw-r--r--libavfilter/avfilter.h6
-rw-r--r--libavfilter/defaults.c27
-rw-r--r--libavfilter/internal.h6
-rw-r--r--libavfilter/libmpcodecs/mp_image.h1
-rw-r--r--libavfilter/vf_aspect.c2
-rw-r--r--libavfilter/vf_drawtext.c174
-rw-r--r--libavfilter/vf_frei0r.c2
-rw-r--r--libavfilter/vf_scale.c12
-rw-r--r--libavfilter/vf_showinfo.c2
-rw-r--r--libavfilter/vf_transpose.c8
-rw-r--r--libavfilter/vsrc_buffer.c52
-rw-r--r--libavfilter/vsrc_buffer.h5
-rw-r--r--libavfilter/vsrc_color.c2
-rw-r--r--libavfilter/vsrc_movie.c28
18 files changed, 333 insertions, 149 deletions
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index bf28f9aa54..de34089468 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -6,7 +6,7 @@ FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_SCALE_FILTER) += swscale
FFLIBS-$(CONFIG_MP_FILTER) += avcodec
-HEADERS = avfilter.h avfiltergraph.h
+HEADERS = avcodec.h avfilter.h avfiltergraph.h
OBJS = allfilters.o \
avfilter.o \
@@ -16,6 +16,8 @@ OBJS = allfilters.o \
formats.o \
graphparser.o \
+OBJS-$(CONFIG_AVCODEC) += avcodec.o
+
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
diff --git a/libavfilter/avcodec.c b/libavfilter/avcodec.c
new file mode 100644
index 0000000000..c2f8651106
--- /dev/null
+++ b/libavfilter/avcodec.c
@@ -0,0 +1,42 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ */
+
+#include "avcodec.h"
+
+void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
+{
+ dst->pts = src->pts;
+ dst->pos = src->pkt_pos;
+ dst->format = src->format;
+
+ switch (dst->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ dst->video->w = src->width;
+ dst->video->h = src->height;
+ dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
+ dst->video->interlaced = src->interlaced_frame;
+ dst->video->top_field_first = src->top_field_first;
+ dst->video->key_frame = src->key_frame;
+ dst->video->pict_type = src->pict_type;
+ }
+}
diff --git a/libavfilter/avcodec.h b/libavfilter/avcodec.h
new file mode 100644
index 0000000000..f438860d0b
--- /dev/null
+++ b/libavfilter/avcodec.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AVCODEC_H
+#define AVFILTER_AVCODEC_H
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ *
+ * This should be included in an application ONLY if the installed
+ * libavfilter has been compiled with libavcodec support, otherwise
+ * symbols defined below will not be available.
+ */
+
+#include "libavcodec/avcodec.h" // AVFrame
+#include "avfilter.h"
+
+/**
+ * Copy the frame properties of src to dst, without copying the actual
+ * image data.
+ */
+void avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
+
+#endif /* AVFILTER_AVCODEC_H */
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index 443562b2f4..72e0a87f8e 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -25,6 +25,7 @@
#include "libavutil/rational.h"
#include "libavutil/audioconvert.h"
#include "libavutil/imgutils.h"
+#include "libavutil/avassert.h"
#include "avfilter.h"
#include "internal.h"
@@ -69,14 +70,47 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
return ret;
}
+static void store_in_pool(AVFilterBufferRef *ref)
+{
+ int i;
+ AVFilterPool *pool= ref->buf->priv;
+
+ av_assert0(ref->buf->data[0]);
+
+ if(pool->count == POOL_SIZE){
+ AVFilterBufferRef *ref1= pool->pic[0];
+ av_freep(&ref1->video);
+ av_freep(&ref1->audio);
+ av_freep(&ref1->buf->data[0]);
+ av_freep(&ref1->buf);
+ av_free(ref1);
+ memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1));
+ pool->count--;
+ pool->pic[POOL_SIZE-1] = NULL;
+ }
+
+ for(i=0; i<POOL_SIZE; i++){
+ if(!pool->pic[i]){
+ pool->pic[i]= ref;
+ pool->count++;
+ break;
+ }
+ }
+}
+
void avfilter_unref_buffer(AVFilterBufferRef *ref)
{
if (!ref)
return;
- if (!(--ref->buf->refcount))
+ if (!(--ref->buf->refcount)){
+ if(!ref->buf->free){
+ store_in_pool(ref);
+ return;
+ }
ref->buf->free(ref->buf);
- av_free(ref->video);
- av_free(ref->audio);
+ }
+ av_freep(&ref->video);
+ av_freep(&ref->audio);
av_free(ref);
}
@@ -238,7 +272,7 @@ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
if (ref->video) {
av_dlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
- ref->video->pixel_aspect.num, ref->video->pixel_aspect.den,
+ ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den,
ref->video->w, ref->video->h,
!ref->video->interlaced ? 'P' : /* Progressive */
ref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
@@ -585,28 +619,53 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
return AVERROR(EINVAL);
ret = av_mallocz(sizeof(AVFilterContext));
+ if (!ret)
+ return AVERROR(ENOMEM);
ret->av_class = &avfilter_class;
ret->filter = filter;
ret->name = inst_name ? av_strdup(inst_name) : NULL;
- ret->priv = av_mallocz(filter->priv_size);
+ if (filter->priv_size) {
+ ret->priv = av_mallocz(filter->priv_size);
+ if (!ret->priv)
+ goto err;
+ }
ret->input_count = pad_count(filter->inputs);
if (ret->input_count) {
ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->input_count);
+ if (!ret->input_pads)
+ goto err;
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->input_count);
ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->input_count);
+ if (!ret->inputs)
+ goto err;
}
ret->output_count = pad_count(filter->outputs);
if (ret->output_count) {
ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->output_count);
+ if (!ret->output_pads)
+ goto err;
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->output_count);
ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->output_count);
+ if (!ret->outputs)
+ goto err;
}
*filter_ctx = ret;
return 0;
+
+err:
+ av_freep(&ret->inputs);
+ av_freep(&ret->input_pads);
+ ret->input_count = 0;
+ av_freep(&ret->outputs);
+ av_freep(&ret->output_pads);
+ ret->output_count = 0;
+ av_freep(&ret->priv);
+ av_free(ret);
+ return AVERROR(ENOMEM);
}
void avfilter_free(AVFilterContext *filter)
diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index d65d48d901..8251f2bf0e 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -26,7 +26,7 @@
#include "libavutil/samplefmt.h"
#define LIBAVFILTER_VERSION_MAJOR 1
-#define LIBAVFILTER_VERSION_MINOR 77
+#define LIBAVFILTER_VERSION_MINOR 78
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
@@ -112,7 +112,7 @@ typedef struct AVFilterBufferRefAudioProps {
typedef struct AVFilterBufferRefVideoProps {
int w; ///< image width
int h; ///< image height
- AVRational pixel_aspect; ///< pixel aspect ratio
+ AVRational sample_aspect_ratio; ///< sample aspect ratio
int interlaced; ///< is frame interlaced
int top_field_first; ///< field order
enum AVPictureType pict_type; ///< picture type of the frame
@@ -619,6 +619,8 @@ struct AVFilterLink {
* input link is assumed to be an unchangeable property.
*/
AVRational time_base;
+
+ struct AVFilterPool *pool;
};
/**
diff --git a/libavfilter/defaults.c b/libavfilter/defaults.c
index 1da2630471..9ee23e57b7 100644
--- a/libavfilter/defaults.c
+++ b/libavfilter/defaults.c
@@ -25,7 +25,6 @@
#include "avfilter.h"
#include "internal.h"
-/* TODO: buffer pool. see comment for avfilter_default_get_video_buffer() */
void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
{
av_free(ptr->data[0]);
@@ -39,10 +38,30 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
{
int linesize[4];
uint8_t *data[4];
+ int i;
AVFilterBufferRef *picref = NULL;
+ AVFilterPool *pool= link->pool;
+
+ if(pool) for(i=0; i<POOL_SIZE; i++){
+ picref= pool->pic[i];
+ if(picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h){
+ AVFilterBuffer *pic= picref->buf;
+ pool->pic[i]= NULL;
+ pool->count--;
+ picref->video->w = w;
+ picref->video->h = h;
+ picref->perms = perms | AV_PERM_READ;
+ picref->format= link->format;
+ pic->refcount = 1;
+ memcpy(picref->data, pic->data, sizeof(picref->data));
+ memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
+ return picref;
+ }
+ }else
+ pool = link->pool = av_mallocz(sizeof(AVFilterPool));
// +2 is needed for swscaler, +16 to be SIMD-friendly
- if (av_image_alloc(data, linesize, w, h, link->format, 16) < 0)
+ if ((i=av_image_alloc(data, linesize, w, h, link->format, 16)) < 0)
return NULL;
picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
@@ -51,6 +70,10 @@ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int per
av_free(data[0]);
return NULL;
}
+ memset(data[0], 128, i);
+
+ picref->buf->priv= pool;
+ picref->buf->free= NULL;
return picref;
}
diff --git a/libavfilter/internal.h b/libavfilter/internal.h
index 188da87099..159e979168 100644
--- a/libavfilter/internal.h
+++ b/libavfilter/internal.h
@@ -27,6 +27,12 @@
#include "avfilter.h"
#include "avfiltergraph.h"
+#define POOL_SIZE 32
+typedef struct AVFilterPool {
+ AVFilterBufferRef *pic[POOL_SIZE];
+ int count;
+}AVFilterPool;
+
/**
* Check for the validity of graph.
*
diff --git a/libavfilter/libmpcodecs/mp_image.h b/libavfilter/libmpcodecs/mp_image.h
index 3d566af693..50d3fa19a6 100644
--- a/libavfilter/libmpcodecs/mp_image.h
+++ b/libavfilter/libmpcodecs/mp_image.h
@@ -33,6 +33,7 @@
#undef rand
#undef srand
#undef printf
+#undef strncpy
#define ASMALIGN(ZEROBITS) ".p2align " #ZEROBITS "\n\t"
diff --git a/libavfilter/vf_aspect.c b/libavfilter/vf_aspect.c
index 95900d15a9..3b4a57cf58 100644
--- a/libavfilter/vf_aspect.c
+++ b/libavfilter/vf_aspect.c
@@ -65,7 +65,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
AspectContext *aspect = link->dst->priv;
- picref->video->pixel_aspect = aspect->aspect;
+ picref->video->sample_aspect_ratio = aspect->aspect;
avfilter_start_frame(link->dst->outputs[0], picref);
}
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index 99045b7b4b..b26029bb8f 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -45,17 +45,13 @@
#include FT_FREETYPE_H
#include FT_GLYPH_H
-#define MAX_EXPANDED_TEXT_SIZE 2048
-
typedef struct {
const AVClass *class;
- char *fontfile; ///< font to be used
- char *text; ///< text to be drawn
+ uint8_t *fontfile; ///< font to be used
+ uint8_t *text; ///< text to be drawn
+ uint8_t *text_priv; ///< used to detect whether text changed
int ft_load_flags; ///< flags used for loading fonts, see FT_LOAD_*
- /** buffer containing the text expanded by strftime */
- char expanded_text[MAX_EXPANDED_TEXT_SIZE];
- /** positions for each element in the text */
- FT_Vector positions[MAX_EXPANDED_TEXT_SIZE];
+ FT_Vector *positions; ///< positions for each element in the text
char *textfile; ///< file with text to be drawn
unsigned int x; ///< x position to start drawing text
unsigned int y; ///< y position to start drawing text
@@ -157,9 +153,10 @@ typedef struct {
int bitmap_top;
} Glyph;
-static int glyph_cmp(const Glyph *a, const Glyph *b)
+static int glyph_cmp(void *key, const void *b)
{
- int64_t diff = (int64_t)a->code - (int64_t)b->code;
+ const Glyph *a = key, *bb = b;
+ int64_t diff = (int64_t)a->code - (int64_t)bb->code;
return diff > 0 ? 1 : diff < 0 ? -1 : 0;
}
@@ -169,21 +166,26 @@ static int glyph_cmp(const Glyph *a, const Glyph *b)
static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
{
DrawTextContext *dtext = ctx->priv;
- Glyph *glyph = av_mallocz(sizeof(Glyph));
+ Glyph *glyph;
struct AVTreeNode *node = NULL;
int ret;
/* load glyph into dtext->face->glyph */
- ret = FT_Load_Char(dtext->face, code, dtext->ft_load_flags);
- if (ret)
+ if (FT_Load_Char(dtext->face, code, dtext->ft_load_flags))
return AVERROR(EINVAL);
/* save glyph */
+ if (!(glyph = av_mallocz(sizeof(*glyph))) ||
+ !(glyph->glyph = av_mallocz(sizeof(*glyph->glyph)))) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
glyph->code = code;
- glyph->glyph = av_mallocz(sizeof(FT_Glyph));
- ret = FT_Get_Glyph(dtext->face->glyph, glyph->glyph);
- if (ret)
- return AVERROR(EINVAL);
+
+ if (FT_Get_Glyph(dtext->face->glyph, glyph->glyph)) {
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
glyph->bitmap = dtext->face->glyph->bitmap;
glyph->bitmap_left = dtext->face->glyph->bitmap_left;
@@ -194,19 +196,29 @@ static int load_glyph(AVFilterContext *ctx, Glyph **glyph_ptr, uint32_t code)
FT_Glyph_Get_CBox(*glyph->glyph, ft_glyph_bbox_pixels, &glyph->bbox);
/* cache the newly created glyph */
- if (!node)
- node = av_mallocz(av_tree_node_size);
- av_tree_insert(&dtext->glyphs, glyph, (void *)glyph_cmp, &node);
+ if (!(node = av_mallocz(av_tree_node_size))) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ av_tree_insert(&dtext->glyphs, glyph, glyph_cmp, &node);
if (glyph_ptr)
*glyph_ptr = glyph;
return 0;
+
+error:
+ if (glyph)
+ av_freep(&glyph->glyph);
+ av_freep(&glyph);
+ av_freep(&node);
+ return ret;
}
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
{
int err;
DrawTextContext *dtext = ctx->priv;
+ Glyph *glyph;
dtext->class = &drawtext_class;
av_opt_set_defaults2(dtext, 0, 0);
@@ -294,14 +306,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
/* load the fallback glyph with code 0 */
load_glyph(ctx, NULL, 0);
+ /* set the tabsize in pixels */
+ if ((err = load_glyph(ctx, &glyph, ' ') < 0)) {
+ av_log(ctx, AV_LOG_ERROR, "Could not set tabsize.\n");
+ return err;
+ }
+ dtext->tabsize *= glyph->advance;
+
#if !HAVE_LOCALTIME_R
av_log(ctx, AV_LOG_WARNING, "strftime() expansion unavailable!\n");
-#else
- if (strlen(dtext->text) >= MAX_EXPANDED_TEXT_SIZE) {
- av_log(ctx, AV_LOG_ERROR,
- "Impossible to print text, string is too big\n");
- return AVERROR(EINVAL);
- }
#endif
return 0;
@@ -338,6 +351,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&dtext->text);
av_freep(&dtext->fontcolor_string);
av_freep(&dtext->boxcolor_string);
+ av_freep(&dtext->positions);
av_freep(&dtext->shadowcolor_string);
av_tree_enumerate(dtext->glyphs, NULL, NULL, glyph_enu_free);
av_tree_destroy(dtext->glyphs);
@@ -393,7 +407,7 @@ static int config_input(AVFilterLink *inlink)
luma_pos = ((x) ) + ((y) ) * picref->linesize[0]; \
alpha = yuva_color[3] * (val) * 129; \
picref->data[0][luma_pos] = (alpha * yuva_color[0] + (255*255*129 - alpha) * picref->data[0][luma_pos] ) >> 23; \
- if(((x) & ((1<<(hsub))-1))==0 && ((y) & ((1<<(vsub))-1))==0){\
+ if (((x) & ((1<<(hsub)) - 1)) == 0 && ((y) & ((1<<(vsub)) - 1)) == 0) {\
chroma_pos1 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[1]; \
chroma_pos2 = ((x) >> (hsub)) + ((y) >> (vsub)) * picref->linesize[2]; \
picref->data[1][chroma_pos1] = (alpha * yuva_color[1] + (255*255*129 - alpha) * picref->data[1][chroma_pos1]) >> 23; \
@@ -403,7 +417,7 @@ static int config_input(AVFilterLink *inlink)
static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, unsigned int x,
unsigned int y, unsigned int width, unsigned int height,
- unsigned char yuva_color[4], int hsub, int vsub)
+ const uint8_t yuva_color[4], int hsub, int vsub)
{
int r, c, alpha;
unsigned int luma_pos, chroma_pos1, chroma_pos2;
@@ -439,7 +453,7 @@ static inline int draw_glyph_yuv(AVFilterBufferRef *picref, FT_Bitmap *bitmap, u
static inline int draw_glyph_rgb(AVFilterBufferRef *picref, FT_Bitmap *bitmap,
unsigned int x, unsigned int y,
unsigned int width, unsigned int height, int pixel_step,
- unsigned char rgba_color[4], uint8_t rgba_map[4])
+ const uint8_t rgba_color[4], const uint8_t rgba_map[4])
{
int r, c, alpha;
uint8_t *p;
@@ -495,10 +509,15 @@ static inline void drawbox(AVFilterBufferRef *picref, unsigned int x, unsigned i
}
}
+static inline int is_newline(uint32_t c)
+{
+ return (c == '\n' || c == '\r' || c == '\f' || c == '\v');
+}
+
static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref,
int width, int height, const uint8_t rgbcolor[4], const uint8_t yuvcolor[4], int x, int y)
{
- char *text = HAVE_LOCALTIME_R ? dtext->expanded_text : dtext->text;
+ char *text = dtext->text;
uint32_t code = 0;
int i;
uint8_t *p;
@@ -537,44 +556,53 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
int width, int height)
{
DrawTextContext *dtext = ctx->priv;
- char *text = dtext->text;
uint32_t code = 0, prev_code = 0;
int x = 0, y = 0, i = 0, ret;
int text_height, baseline;
uint8_t *p;
- int str_w, str_w_max;
+ int str_w = 0;
int y_min = 32000, y_max = -32000;
FT_Vector delta;
Glyph *glyph = NULL, *prev_glyph = NULL;
Glyph dummy = { 0 };
+ if (dtext->text != dtext->text_priv) {
#if HAVE_LOCALTIME_R
- time_t now = time(0);
- struct tm ltime;
- size_t expanded_text_len;
-
- dtext->expanded_text[0] = '\1';
- expanded_text_len = strftime(dtext->expanded_text, MAX_EXPANDED_TEXT_SIZE,
- text, localtime_r(&now, &ltime));
- text = dtext->expanded_text;
- if (expanded_text_len == 0 && dtext->expanded_text[0] != '\0') {
- av_log(ctx, AV_LOG_ERROR,
- "Impossible to print text, string is too big\n");
- return AVERROR(EINVAL);
- }
+ time_t now = time(0);
+ struct tm ltime;
+ uint8_t *buf = NULL;
+ int buflen = 2*strlen(dtext->text) + 1, len;
+
+ localtime_r(&now, &ltime);
+
+ while ((buf = av_realloc(buf, buflen))) {
+ *buf = 1;
+ if ((len = strftime(buf, buflen, dtext->text, &ltime)) != 0 || *buf == 0)
+ break;
+ buflen *= 2;
+ }
+ if (!buf)
+ return AVERROR(ENOMEM);
+ av_freep(&dtext->text);
+ dtext->text = dtext->text_priv = buf;
+#else
+ dtext->text_priv = dtext->text;
#endif
+ if (!(dtext->positions = av_realloc(dtext->positions,
+ strlen(dtext->text)*sizeof(*dtext->positions))))
+ return AVERROR(ENOMEM);
+ }
- str_w = str_w_max = 0;
x = dtext->x;
y = dtext->y;
/* load and cache glyphs */
- for (i = 0, p = text; *p; i++) {
+ for (i = 0, p = dtext->text; *p; i++) {
GET_UTF8(code, *p++, continue;);
/* get glyph */
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL);
if (!glyph)
load_glyph(ctx, &glyph, code);
@@ -586,17 +614,25 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
/* compute and save position for each glyph */
glyph = NULL;
- for (i = 0, p = text; *p; i++) {
+ for (i = 0, p = dtext->text; *p; i++) {
GET_UTF8(code, *p++, continue;);
/* skip the \n in the sequence \r\n */
if (prev_code == '\r' && code == '\n')
continue;
+ prev_code = code;
+ if (is_newline(code)) {
+ str_w = FFMAX(str_w, x - dtext->x);
+ y += text_height;
+ x = dtext->x;
+ continue;
+ }
+
/* get glyph */
prev_glyph = glyph;
dummy.code = code;
- glyph = av_tree_find(dtext->glyphs, &dummy, (void *)glyph_cmp, NULL);
+ glyph = av_tree_find(dtext->glyphs, &dummy, glyph_cmp, NULL);
/* kerning */
if (dtext->use_kerning && prev_glyph && glyph->code) {
@@ -605,9 +641,8 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
x += delta.x >> 6;
}
- if (x + glyph->advance >= width || code == '\r' || code == '\n') {
- if (x + glyph->advance >= width)
- str_w_max = width - dtext->x - 1;
+ if (x + glyph->bbox.xMax >= width) {
+ str_w = FFMAX(str_w, x - dtext->x);
y += text_height;
x = dtext->x;
}
@@ -615,38 +650,27 @@ static int draw_text(AVFilterContext *ctx, AVFilterBufferRef *picref,
/* save position */
dtext->positions[i].x = x + glyph->bitmap_left;
dtext->positions[i].y = y - glyph->bitmap_top + baseline;
- if (code != '\n' && code != '\r') {
- int advance = glyph->advance;
- if (code == '\t')
- advance *= dtext->tabsize;
- x += advance;
- str_w += advance;
- }
- prev_code = code;
+ if (code == '\t') x = (x / dtext->tabsize + 1)*dtext->tabsize;
+ else x += glyph->advance;
}
- y += text_height;
- if (str_w_max == 0)
- str_w_max = str_w;
+ str_w = FFMIN(width - dtext->x - 1, FFMAX(str_w, x - dtext->x));
+ y = FFMIN(y + text_height, height - 1);
/* draw box */
- if (dtext->draw_box) {
- /* check if it doesn't pass the limits */
- str_w_max = FFMIN(str_w_max, width - dtext->x - 1);
- y = FFMIN(y, height - 1);
-
- /* draw background */
- drawbox(picref, dtext->x, dtext->y, str_w_max, y-dtext->y,
+ if (dtext->draw_box)
+ drawbox(picref, dtext->x, dtext->y, str_w, y-dtext->y,
dtext->box_line, dtext->pixel_step, dtext->boxcolor,
dtext->hsub, dtext->vsub, dtext->is_packed_rgb, dtext->rgba_map);
- }
- if(dtext->shadowx || dtext->shadowy){
- if((ret=draw_glyphs(dtext, picref, width, height, dtext->shadowcolor_rgba, dtext->shadowcolor, dtext->shadowx, dtext->shadowy))<0)
+ if (dtext->shadowx || dtext->shadowy) {
+ if ((ret = draw_glyphs(dtext, picref, width, height, dtext->shadowcolor_rgba,
+ dtext->shadowcolor, dtext->shadowx, dtext->shadowy)) < 0)
return ret;
}
- if((ret=draw_glyphs(dtext, picref, width, height, dtext->fontcolor_rgba, dtext->fontcolor, 0, 0))<0)
+ if ((ret = draw_glyphs(dtext, picref, width, height, dtext->fontcolor_rgba,
+ dtext->fontcolor, 0, 0)) < 0)
return ret;
return 0;
diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c
index adccccb95b..0cb5fd30b5 100644
--- a/libavfilter/vf_frei0r.c
+++ b/libavfilter/vf_frei0r.c
@@ -430,7 +430,7 @@ static int source_request_frame(AVFilterLink *outlink)
{
Frei0rContext *frei0r = outlink->src->priv;
AVFilterBufferRef *picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
- picref->video->pixel_aspect = (AVRational) {1, 1};
+ picref->video->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = frei0r->pts++;
picref->pos = -1;
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 27214a6a2f..9ff93bd411 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -214,14 +214,18 @@ static int config_props(AVFilterLink *outlink)
scale->input_is_pal = av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL;
- if(scale->sws)
+ if (scale->sws)
sws_freeContext(scale->sws);
scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format,
outlink->w, outlink->h, outlink->format,
scale->flags, NULL, NULL, NULL);
+ if (scale->isws[0])
+ sws_freeContext(scale->isws[0]);
scale->isws[0] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
outlink->w, outlink->h/2, outlink->format,
scale->flags, NULL, NULL, NULL);
+ if (scale->isws[1])
+ sws_freeContext(scale->isws[1]);
scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
outlink->w, outlink->h/2, outlink->format,
scale->flags, NULL, NULL, NULL);
@@ -252,9 +256,9 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
outlink->out_buf = outpicref;
- av_reduce(&outpicref->video->pixel_aspect.num, &outpicref->video->pixel_aspect.den,
- (int64_t)picref->video->pixel_aspect.num * outlink->h * link->w,
- (int64_t)picref->video->pixel_aspect.den * outlink->w * link->h,
+ av_reduce(&outpicref->video->sample_aspect_ratio.num, &outpicref->video->sample_aspect_ratio.den,
+ (int64_t)picref->video->sample_aspect_ratio.num * outlink->h * link->w,
+ (int64_t)picref->video->sample_aspect_ratio.den * outlink->w * link->h,
INT_MAX);
scale->slice_y = 0;
diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c
index d2483d5a7f..d512199602 100644
--- a/libavfilter/vf_showinfo.c
+++ b/libavfilter/vf_showinfo.c
@@ -59,7 +59,7 @@ static void end_frame(AVFilterLink *inlink)
showinfo->frame,
picref->pts, picref ->pts * av_q2d(inlink->time_base), picref->pos,
av_pix_fmt_descriptors[picref->format].name,
- picref->video->pixel_aspect.num, picref->video->pixel_aspect.den,
+ picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
picref->video->w, picref->video->h,
!picref->video->interlaced ? 'P' : /* Progressive */
picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */
diff --git a/libavfilter/vf_transpose.c b/libavfilter/vf_transpose.c
index ed91aaade5..a5247c9753 100644
--- a/libavfilter/vf_transpose.c
+++ b/libavfilter/vf_transpose.c
@@ -122,11 +122,11 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
outlink->w, outlink->h);
outlink->out_buf->pts = picref->pts;
- if (picref->video->pixel_aspect.num == 0) {
- outlink->out_buf->video->pixel_aspect = picref->video->pixel_aspect;
+ if (picref->video->sample_aspect_ratio.num == 0) {
+ outlink->out_buf->video->sample_aspect_ratio = picref->video->sample_aspect_ratio;
} else {
- outlink->out_buf->video->pixel_aspect.num = picref->video->pixel_aspect.den;
- outlink->out_buf->video->pixel_aspect.den = picref->video->pixel_aspect.num;
+ outlink->out_buf->video->sample_aspect_ratio.num = picref->video->sample_aspect_ratio.den;
+ outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num;
}
avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
diff --git a/libavfilter/vsrc_buffer.c b/libavfilter/vsrc_buffer.c
index c683d51e6f..84f3b33c3f 100644
--- a/libavfilter/vsrc_buffer.c
+++ b/libavfilter/vsrc_buffer.c
@@ -24,23 +24,21 @@
*/
#include "avfilter.h"
+#include "avcodec.h"
#include "vsrc_buffer.h"
#include "libavutil/imgutils.h"
typedef struct {
- int64_t pts;
AVFrame frame;
int has_frame;
int h, w;
enum PixelFormat pix_fmt;
AVRational time_base; ///< time_base to set in the output link
- AVRational pixel_aspect;
+ AVRational sample_aspect_ratio;
char sws_param[256];
} BufferSourceContext;
int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect, int width,
- int height, enum PixelFormat pix_fmt,
const char *sws_param)
{
BufferSourceContext *c = buffer_filter->priv;
@@ -58,12 +56,14 @@ int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
snprintf(c->sws_param, 255, "%d:%d:%s", c->w, c->h, sws_param);
}
- if(width != c->w || height != c->h || pix_fmt != c->pix_fmt){
+ if (frame->width != c->w || frame->height != c->h || frame->format != c->pix_fmt) {
AVFilterContext *scale= buffer_filter->outputs[0]->dst;
AVFilterLink *link;
- av_log(buffer_filter, AV_LOG_INFO, "Changing filter graph input to accept %dx%d %d (%d %d)\n",
- width,height,pix_fmt, c->pix_fmt, scale && scale->outputs ? scale->outputs[0]->format : -123);
+ av_log(buffer_filter, AV_LOG_INFO,
+ "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
+ c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
+ frame->width, frame->height, av_pix_fmt_descriptors[frame->format].name);
if(!scale || strcmp(scale->filter->name,"scale")){
AVFilter *f= avfilter_get_by_name("scale");
@@ -89,36 +89,26 @@ int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
scale->filter->init(scale, c->sws_param, NULL);
}
- c->pix_fmt= scale->inputs[0]->format= pix_fmt;
- c->w= scale->inputs[0]->w= width;
- c->h= scale->inputs[0]->h= height;
+ c->pix_fmt = scale->inputs[0]->format = frame->format;
+ c->w = scale->inputs[0]->w = frame->width;
+ c->h = scale->inputs[0]->h = frame->height;
link= scale->outputs[0];
if ((ret = link->srcpad->config_props(link)) < 0)
return ret;
}
+ c->frame = *frame;
memcpy(c->frame.data , frame->data , sizeof(frame->data));
memcpy(c->frame.linesize, frame->linesize, sizeof(frame->linesize));
- c->frame.interlaced_frame= frame->interlaced_frame;
- c->frame.top_field_first = frame->top_field_first;
- c->frame.key_frame = frame->key_frame;
- c->frame.pict_type = frame->pict_type;
- c->pts = pts;
- c->pixel_aspect = pixel_aspect;
c->has_frame = 1;
return 0;
}
-int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect)
+int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame)
{
- BufferSourceContext *c = buffer_filter->priv;
-
- return av_vsrc_buffer_add_frame2(buffer_filter, frame,
- pts, pixel_aspect, c->w,
- c->h, c->pix_fmt, "");
+ return av_vsrc_buffer_add_frame2(buffer_filter, frame, "");
}
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
@@ -130,7 +120,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
if (!args ||
(n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
&c->time_base.num, &c->time_base.den,
- &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
+ &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den)) != 7) {
av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but only %d found in '%s'\n", n, args);
return AVERROR(EINVAL);
}
@@ -143,7 +133,10 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
}
}
- av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
+ av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d\n",
+ c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
+ c->time_base.num, c->time_base.den,
+ c->sample_aspect_ratio.num, c->sample_aspect_ratio.den);
return 0;
}
@@ -162,7 +155,7 @@ static int config_props(AVFilterLink *link)
link->w = c->w;
link->h = c->h;
- link->sample_aspect_ratio = c->pixel_aspect;
+ link->sample_aspect_ratio = c->sample_aspect_ratio;
link->time_base = c->time_base;
return 0;
@@ -188,13 +181,8 @@ static int request_frame(AVFilterLink *link)
av_image_copy(picref->data, picref->linesize,
c->frame.data, c->frame.linesize,
picref->format, link->w, link->h);
+ avfilter_copy_frame_props(picref, &c->frame);
- picref->pts = c->pts;
- picref->video->pixel_aspect = c->pixel_aspect;
- picref->video->interlaced = c->frame.interlaced_frame;
- picref->video->top_field_first = c->frame.top_field_first;
- picref->video->key_frame = c->frame.key_frame;
- picref->video->pict_type = c->frame.pict_type;
avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
avfilter_draw_slice(link, 0, link->h, 1);
avfilter_end_frame(link);
diff --git a/libavfilter/vsrc_buffer.h b/libavfilter/vsrc_buffer.h
index 79a9908c69..2dda546e01 100644
--- a/libavfilter/vsrc_buffer.h
+++ b/libavfilter/vsrc_buffer.h
@@ -30,12 +30,9 @@
#include "libavcodec/avcodec.h" /* AVFrame */
#include "avfilter.h"
-int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect);
+int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame);
int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
- int64_t pts, AVRational pixel_aspect, int width,
- int height, enum PixelFormat pix_fmt,
const char *sws_param);
#endif /* AVFILTER_VSRC_BUFFER_H */
diff --git a/libavfilter/vsrc_color.c b/libavfilter/vsrc_color.c
index 3fab260a2f..dc73e1bafa 100644
--- a/libavfilter/vsrc_color.c
+++ b/libavfilter/vsrc_color.c
@@ -132,7 +132,7 @@ static int color_request_frame(AVFilterLink *link)
{
ColorContext *color = link->src->priv;
AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
- picref->video->pixel_aspect = (AVRational) {1, 1};
+ picref->video->sample_aspect_ratio = (AVRational) {1, 1};
picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q);
picref->pos = 0;
diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c
index da601bb11f..e36412f480 100644
--- a/libavfilter/vsrc_movie.c
+++ b/libavfilter/vsrc_movie.c
@@ -35,6 +35,7 @@
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavformat/avformat.h"
+#include "avcodec.h"
#include "avfilter.h"
typedef struct {
@@ -57,12 +58,12 @@ typedef struct {
#define OFFSET(x) offsetof(MovieContext, x)
static const AVOption movie_options[]= {
-{"format_name", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, 0, CHAR_MIN, CHAR_MAX },
-{"f", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, 0, CHAR_MIN, CHAR_MAX },
-{"stream_index", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, -1, -1, INT_MAX },
-{"si", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, -1, -1, INT_MAX },
-{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, 0, 0, (INT64_MAX-1) / 1000000 },
-{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, 0, 0, (INT64_MAX-1) / 1000000 },
+{"format_name", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX },
+{"f", "set format name", OFFSET(format_name), FF_OPT_TYPE_STRING, {.str = 0}, CHAR_MIN, CHAR_MAX },
+{"stream_index", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, {.dbl = -1}, -1, INT_MAX },
+{"si", "set stream index", OFFSET(stream_index), FF_OPT_TYPE_INT, {.dbl = -1}, -1, INT_MAX },
+{"seek_point", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000 },
+{"sp", "set seekpoint (seconds)", OFFSET(seek_point_d), FF_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, (INT64_MAX-1) / 1000000 },
{NULL},
};
@@ -230,7 +231,6 @@ static int movie_get_frame(AVFilterLink *outlink)
while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) {
// Is this a packet from the video stream?
if (pkt.stream_index == movie->stream_index) {
- movie->codec_ctx->reordered_opaque = pkt.pos;
avcodec_decode_video2(movie->codec_ctx, movie->frame, &frame_decoded, &pkt);
if (frame_decoded) {
@@ -240,26 +240,22 @@ static int movie_get_frame(AVFilterLink *outlink)
av_image_copy(movie->picref->data, movie->picref->linesize,
movie->frame->data, movie->frame->linesize,
movie->picref->format, outlink->w, outlink->h);
+ avfilter_copy_frame_props(movie->picref, movie->frame);
/* FIXME: use a PTS correction mechanism as that in
* ffplay.c when some API will be available for that */
/* use pkt_dts if pkt_pts is not available */
movie->picref->pts = movie->frame->pkt_pts == AV_NOPTS_VALUE ?
movie->frame->pkt_dts : movie->frame->pkt_pts;
-
- movie->picref->pos = movie->frame->reordered_opaque;
- movie->picref->video->pixel_aspect = st->sample_aspect_ratio.num ?
- st->sample_aspect_ratio : movie->codec_ctx->sample_aspect_ratio;
- movie->picref->video->interlaced = movie->frame->interlaced_frame;
- movie->picref->video->top_field_first = movie->frame->top_field_first;
- movie->picref->video->key_frame = movie->frame->key_frame;
- movie->picref->video->pict_type = movie->frame->pict_type;
+ if (!movie->frame->sample_aspect_ratio.num)
+ movie->picref->video->sample_aspect_ratio = st->sample_aspect_ratio;
av_dlog(outlink->src,
"movie_get_frame(): file:'%s' pts:%"PRId64" time:%lf pos:%"PRId64" aspect:%d/%d\n",
movie->file_name, movie->picref->pts,
(double)movie->picref->pts * av_q2d(st->time_base),
movie->picref->pos,
- movie->picref->video->pixel_aspect.num, movie->picref->video->pixel_aspect.den);
+ movie->picref->video->sample_aspect_ratio.num,
+ movie->picref->video->sample_aspect_ratio.den);
// We got it. Free the packet since we are returning
av_free_packet(&pkt);