diff options
author | Anton Khirnov <anton@khirnov.net> | 2012-11-28 08:41:07 +0100 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2013-03-08 07:37:18 +0100 |
commit | 7e350379f87e7f74420b4813170fe808e2313911 (patch) | |
tree | 031201839361d40af8b4c829f9c9f179e7d9f58d /libavfilter/vf_yadif.c | |
parent | 77b2cd7b41d7ec8008b6fac753c04f77824c514c (diff) | |
download | ffmpeg-7e350379f87e7f74420b4813170fe808e2313911.tar.gz |
lavfi: switch to AVFrame.
Deprecate AVFilterBuffer/AVFilterBufferRef and everything related to it
and use AVFrame instead.
Diffstat (limited to 'libavfilter/vf_yadif.c')
-rw-r--r-- | libavfilter/vf_yadif.c | 65 |
1 files changed, 30 insertions, 35 deletions
diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index d1670902bb..37e9887421 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -31,8 +31,6 @@ #undef NDEBUG #include <assert.h> -#define PERM_RWP AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE - #define CHECK(j)\ { int score = FFABS(cur[mrefs + off_left + (j)] - cur[prefs + off_left - (j)])\ + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\ @@ -167,15 +165,15 @@ static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1, FILTER(w - 3, w) } -static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, +static void filter(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff) { YADIFContext *yadif = ctx->priv; int y, i; for (i = 0; i < yadif->csp->nb_components; i++) { - int w = dstpic->video->w; - int h = dstpic->video->h; + int w = dstpic->width; + int h = dstpic->height; int refs = yadif->cur->linesize[i]; int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8; int l_edge, l_edge_pix; @@ -225,23 +223,22 @@ static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, emms_c(); } -static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, - int w, int h) +static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h) { - AVFilterBufferRef *picref; + AVFrame *frame; int width = FFALIGN(w, 32); int height = FFALIGN(h + 2, 32); int i; - picref = ff_default_get_video_buffer(link, perms, width, height); + frame = ff_default_get_video_buffer(link, width, height); - picref->video->w = w; - picref->video->h = h; + frame->width = w; + frame->height = h; for (i = 0; i < 3; i++) - picref->data[i] += picref->linesize[i]; + frame->data[i] += frame->linesize[i]; - return picref; + return frame; } static int return_frame(AVFilterContext *ctx, int is_second) @@ -251,19 +248,19 @@ static int return_frame(AVFilterContext *ctx, int is_second) int tff, ret; if (yadif->parity == -1) { - tff = yadif->cur->video->interlaced ? - yadif->cur->video->top_field_first : 1; + tff = yadif->cur->interlaced_frame ? + yadif->cur->top_field_first : 1; } else { tff = yadif->parity ^ 1; } if (is_second) { - yadif->out = ff_get_video_buffer(link, PERM_RWP, link->w, link->h); + yadif->out = ff_get_video_buffer(link, link->w, link->h); if (!yadif->out) return AVERROR(ENOMEM); - avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); - yadif->out->video->interlaced = 0; + av_frame_copy_props(yadif->out, yadif->cur); + yadif->out->interlaced_frame = 0; } filter(ctx, yadif->out, tff ^ !is_second, tff); @@ -284,7 +281,7 @@ static int return_frame(AVFilterContext *ctx, int is_second) return ret; } -static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) +static int filter_frame(AVFilterLink *link, AVFrame *frame) { AVFilterContext *ctx = link->dst; YADIFContext *yadif = ctx->priv; @@ -293,36 +290,35 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) return_frame(ctx, 1); if (yadif->prev) - avfilter_unref_buffer(yadif->prev); + av_frame_free(&yadif->prev); yadif->prev = yadif->cur; yadif->cur = yadif->next; - yadif->next = picref; + yadif->next = frame; if (!yadif->cur) return 0; - if (yadif->auto_enable && !yadif->cur->video->interlaced) { - yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); + if (yadif->auto_enable && !yadif->cur->interlaced_frame) { + yadif->out = av_frame_clone(yadif->cur); if (!yadif->out) return AVERROR(ENOMEM); - avfilter_unref_bufferp(&yadif->prev); + av_frame_free(&yadif->prev); if (yadif->out->pts != AV_NOPTS_VALUE) yadif->out->pts *= 2; return ff_filter_frame(ctx->outputs[0], yadif->out); } if (!yadif->prev && - !(yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ))) + !(yadif->prev = av_frame_clone(yadif->cur))) return AVERROR(ENOMEM); - yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP, - link->w, link->h); + yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); if (!yadif->out) return AVERROR(ENOMEM); - avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); - yadif->out->video->interlaced = 0; + av_frame_copy_props(yadif->out, yadif->cur); + yadif->out->interlaced_frame = 0; if (yadif->out->pts != AV_NOPTS_VALUE) yadif->out->pts *= 2; @@ -349,8 +345,7 @@ static int request_frame(AVFilterLink *link) ret = ff_request_frame(link->src->inputs[0]); if (ret == AVERROR_EOF && yadif->next) { - AVFilterBufferRef *next = - avfilter_ref_buffer(yadif->next, AV_PERM_READ); + AVFrame *next = av_frame_clone(yadif->next); if (!next) return AVERROR(ENOMEM); @@ -389,7 +384,7 @@ static int poll_frame(AVFilterLink *link) } assert(yadif->next || !val); - if (yadif->auto_enable && yadif->next && !yadif->next->video->interlaced) + if (yadif->auto_enable && yadif->next && !yadif->next->interlaced_frame) return val; return val * ((yadif->mode&1)+1); @@ -399,9 +394,9 @@ static av_cold void uninit(AVFilterContext *ctx) { YADIFContext *yadif = ctx->priv; - if (yadif->prev) avfilter_unref_bufferp(&yadif->prev); - if (yadif->cur ) avfilter_unref_bufferp(&yadif->cur ); - if (yadif->next) avfilter_unref_bufferp(&yadif->next); + if (yadif->prev) av_frame_free(&yadif->prev); + if (yadif->cur ) av_frame_free(&yadif->cur ); + if (yadif->next) av_frame_free(&yadif->next); } static int query_formats(AVFilterContext *ctx) |