diff options
author | Michael Niedermayer <michael@niedermayer.cc> | 2015-07-26 21:25:21 +0200 |
---|---|---|
committer | Michael Niedermayer <michael@niedermayer.cc> | 2015-07-29 16:18:20 +0200 |
commit | d64ba25a4dd7951e6ac63ecad55a1dfe665d15b0 (patch) | |
tree | 02e97e206613823c347a42758e5968d967c308a9 /ffplay.c | |
parent | f40ec70478648c1e6cde43b8577c3c29380372ee (diff) | |
download | ffmpeg-d64ba25a4dd7951e6ac63ecad55a1dfe665d15b0.tar.gz |
ffplay: Use sws_scale to scale subtitles
Fixes some files from Ticket679
This also changes subtitles to 4:2:0 matching the output format and thus
simplifying the blend code.
This restricts placement to the chroma sample resolution though, speak up
if you consider this a problem, say so, the code could be changed to use
YUV444 for subtitles and scaling them down while blending, this would be
slower though.
The current code only uses a single swscale context and reinitializes it
as needed, this could be changed as well if needed
Reviewed-by: Marton Balint <cus@passwd.hu>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
Diffstat (limited to 'ffplay.c')
-rw-r--r-- | ffplay.c | 284 |
1 files changed, 71 insertions, 213 deletions
@@ -223,6 +223,9 @@ typedef struct VideoState { Decoder viddec; Decoder subdec; + int viddec_width; + int viddec_height; + int audio_stream; int av_sync_type; @@ -278,6 +281,7 @@ typedef struct VideoState { #if !CONFIG_AVFILTER struct SwsContext *img_convert_ctx; #endif + struct SwsContext *sub_convert_ctx; SDL_Rect last_display_rect; int eof; @@ -829,229 +833,50 @@ static void fill_border(int xleft, int ytop, int width, int height, int x, int y #define ALPHA_BLEND(a, oldp, newp, s)\ ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s)) -#define RGBA_IN(r, g, b, a, s)\ -{\ - unsigned int v = ((const uint32_t *)(s))[0];\ - a = (v >> 24) & 0xff;\ - r = (v >> 16) & 0xff;\ - g = (v >> 8) & 0xff;\ - b = v & 0xff;\ -} - -#define YUVA_IN(y, u, v, a, s, pal)\ -{\ - unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\ - a = (val >> 24) & 0xff;\ - y = (val >> 16) & 0xff;\ - u = (val >> 8) & 0xff;\ - v = val & 0xff;\ -} - -#define YUVA_OUT(d, y, u, v, a)\ -{\ - ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\ -} #define BPP 1 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh) { - int wrap, wrap3, width2, skip2; - int y, u, v, a, u1, v1, a1, w, h; + int x, y, Y, U, V, A; uint8_t *lum, *cb, *cr; - const uint8_t *p; - const uint32_t *pal; int dstx, dsty, dstw, dsth; + const AVPicture *src = &rect->pict; dstw = av_clip(rect->w, 0, imgw); dsth = av_clip(rect->h, 0, imgh); dstx = av_clip(rect->x, 0, imgw - dstw); dsty = av_clip(rect->y, 0, imgh - dsth); - lum = dst->data[0] + dsty * dst->linesize[0]; - cb = dst->data[1] + (dsty >> 1) * dst->linesize[1]; - cr = dst->data[2] + (dsty >> 1) * dst->linesize[2]; - - width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1); - skip2 = dstx >> 1; - wrap = dst->linesize[0]; - wrap3 = rect->pict.linesize[0]; - p = rect->pict.data[0]; - pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */ - - if (dsty & 1) { - lum += dstx; - cb += skip2; - cr += skip2; - - if (dstx & 1) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); - cb++; - cr++; + lum = dst->data[0] + dstx + dsty * dst->linesize[0]; + cb = dst->data[1] + dstx/2 + (dsty >> 1) * dst->linesize[1]; + cr = dst->data[2] + dstx/2 + (dsty >> 1) * dst->linesize[2]; + + for (y = 0; y<dsth; y++) { + for (x = 0; x<dstw; x++) { + Y = src->data[0][x + y*src->linesize[0]]; + A = src->data[3][x + y*src->linesize[3]]; + lum[0] = ALPHA_BLEND(A, lum[0], Y, 0); lum++; - p += BPP; - } - for (w = dstw - (dstx & 1); w >= 2; w -= 2) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1); - cb++; - cr++; - p += 2 * BPP; - lum += 2; - } - if (w) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); - p++; - lum++; - } - p += wrap3 - dstw * BPP; - lum += wrap - dstw - dstx; - cb += dst->linesize[1] - width2 - skip2; - cr += dst->linesize[2] - width2 - skip2; - } - for (h = dsth - (dsty & 1); h >= 2; h -= 2) { - lum += dstx; - cb += skip2; - cr += skip2; - - if (dstx & 1) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - p += wrap3; - lum += wrap; - YUVA_IN(y, u, v, a, p, pal); - u1 += u; - v1 += v; - a1 += a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1); - cb++; - cr++; - p += -wrap3 + BPP; - lum += -wrap + 1; } - for (w = dstw - (dstx & 1); w >= 2; w -= 2) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - p += wrap3; - lum += wrap; - - YUVA_IN(y, u, v, a, p, pal); - u1 += u; - v1 += v; - a1 += a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2); + lum += dst->linesize[0] - dstw; + } + for (y = 0; y<dsth/2; y++) { + for (x = 0; x<dstw/2; x++) { + U = src->data[1][x + y*src->linesize[1]]; + V = src->data[2][x + y*src->linesize[2]]; + A = src->data[3][2*x + 2*y *src->linesize[3]] + + src->data[3][2*x + 1 + 2*y *src->linesize[3]] + + src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]] + + src->data[3][2*x + (2*y+1)*src->linesize[3]]; + cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0); + cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0); cb++; cr++; - p += -wrap3 + 2 * BPP; - lum += -wrap + 2; - } - if (w) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - p += wrap3; - lum += wrap; - YUVA_IN(y, u, v, a, p, pal); - u1 += u; - v1 += v; - a1 += a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1); - cb++; - cr++; - p += -wrap3 + BPP; - lum += -wrap + 1; - } - p += wrap3 + (wrap3 - dstw * BPP); - lum += wrap + (wrap - dstw - dstx); - cb += dst->linesize[1] - width2 - skip2; - cr += dst->linesize[2] - width2 - skip2; - } - /* handle odd height */ - if (h) { - lum += dstx; - cb += skip2; - cr += skip2; - - if (dstx & 1) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); - cb++; - cr++; - lum++; - p += BPP; - } - for (w = dstw - (dstx & 1); w >= 2; w -= 2) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1); - cb++; - cr++; - p += 2 * BPP; - lum += 2; - } - if (w) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); } + cb += dst->linesize[1] - dstw/2; + cr += dst->linesize[2] - dstw/2; } } @@ -1306,6 +1131,7 @@ static void stream_close(VideoState *is) #if !CONFIG_AVFILTER sws_freeContext(is->img_convert_ctx); #endif + sws_freeContext(is->sub_convert_ctx); av_free(is); } @@ -1893,6 +1719,9 @@ static int get_video_frame(VideoState *is, AVFrame *frame) frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); + is->viddec_width = frame->width; + is->viddec_height = frame->height; + if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { if (frame->pts != AV_NOPTS_VALUE) { double diff = dpts - get_master_clock(is); @@ -2328,8 +2157,7 @@ static int subtitle_thread(void *arg) Frame *sp; int got_subtitle; double pts; - int i, j; - int r, g, b, y, u, v, a; + int i; for (;;) { if (!(sp = frame_queue_peek_writable(&is->subpq))) @@ -2348,14 +2176,41 @@ static int subtitle_thread(void *arg) for (i = 0; i < sp->sub.num_rects; i++) { - for (j = 0; j < sp->sub.rects[i]->nb_colors; j++) - { - RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j); - y = RGB_TO_Y_CCIR(r, g, b); - u = RGB_TO_U_CCIR(r, g, b, 0); - v = RGB_TO_V_CCIR(r, g, b, 0); - YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a); + int in_w = sp->sub.rects[i]->w; + int in_h = sp->sub.rects[i]->h; + int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width; + int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height; + int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w; + int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h; + AVPicture newpic; + + //can not use avpicture_alloc as it is not compatible with avsubtitle_free() + av_image_fill_linesizes(newpic.linesize, AV_PIX_FMT_YUVA420P, out_w); + newpic.data[0] = av_malloc(newpic.linesize[0] * out_h); + newpic.data[3] = av_malloc(newpic.linesize[3] * out_h); + newpic.data[1] = av_malloc(newpic.linesize[1] * ((out_h+1)/2)); + newpic.data[2] = av_malloc(newpic.linesize[2] * ((out_h+1)/2)); + + is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx, + in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h, + AV_PIX_FMT_YUVA420P, sws_flags, NULL, NULL, NULL); + if (!is->sub_convert_ctx || !newpic.data[0] || !newpic.data[3] || + !newpic.data[1] || !newpic.data[2] + ) { + av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n"); + exit(1); } + sws_scale(is->sub_convert_ctx, + (void*)sp->sub.rects[i]->pict.data, sp->sub.rects[i]->pict.linesize, + 0, in_h, newpic.data, newpic.linesize); + + av_free(sp->sub.rects[i]->pict.data[0]); + av_free(sp->sub.rects[i]->pict.data[1]); + sp->sub.rects[i]->pict = newpic; + sp->sub.rects[i]->w = out_w; + sp->sub.rects[i]->h = out_h; + sp->sub.rects[i]->x = sp->sub.rects[i]->x * out_w / in_w; + sp->sub.rects[i]->y = sp->sub.rects[i]->y * out_h / in_h; } /* now we can update the picture count */ @@ -2772,6 +2627,9 @@ static int stream_component_open(VideoState *is, int stream_index) is->video_stream = stream_index; is->video_st = ic->streams[stream_index]; + is->viddec_width = avctx->width; + is->viddec_height = avctx->height; + decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread); decoder_start(&is->viddec, video_thread, is); is->queue_attachments_req = 1; |