aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2011-08-23 18:26:37 +0200
committerMichael Niedermayer <michaelni@gmx.at>2011-08-23 18:26:37 +0200
commit4ca6a151e0c48f2729027ac263269cb50e88ed71 (patch)
tree1307128caf78999abcf1600fd0fa8224770baa3b /libavcodec
parent276f43be68617c0d56001abf1e213911dd51fed7 (diff)
parentbc8c50512fc8550ae80535390379e00937623e00 (diff)
downloadffmpeg-4ca6a151e0c48f2729027ac263269cb50e88ed71.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: avconv: print the codecs names in the stream mapping. avconv: move the avcodec_find_decoder() call to add_input_streams(). Windows Media Image decoder (WMVP/WVP2) ac3enc: remove outdated TODO comment for apply_channel_coupling() Conflicts: Changelog libavcodec/avcodec.h libavcodec/vc1dec.c libavcodec/version.h Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/ac3enc_template.c5
-rw-r--r--libavcodec/allcodecs.c2
-rw-r--r--libavcodec/avcodec.h2
-rw-r--r--libavcodec/h263dec.c2
-rw-r--r--libavcodec/mpegvideo.c12
-rw-r--r--libavcodec/vc1.c3
-rw-r--r--libavcodec/vc1.h3
-rw-r--r--libavcodec/vc1dec.c404
-rw-r--r--libavcodec/vc1dsp.c68
-rw-r--r--libavcodec/vc1dsp.h10
-rw-r--r--libavcodec/version.h2
11 files changed, 425 insertions, 88 deletions
diff --git a/libavcodec/ac3enc_template.c b/libavcodec/ac3enc_template.c
index 943a55a133..dd759a732b 100644
--- a/libavcodec/ac3enc_template.c
+++ b/libavcodec/ac3enc_template.c
@@ -132,11 +132,6 @@ static inline float calc_cpl_coord(float energy_ch, float energy_cpl)
/**
* Calculate coupling channel and coupling coordinates.
- * TODO: Currently this is only used for the floating-point encoder. I was
- * able to make it work for the fixed-point encoder, but quality was
- * generally lower in most cases than not using coupling. If a more
- * adaptive coupling strategy were to be implemented it might be useful
- * at that time to use coupling for the fixed-point encoder as well.
*/
static void apply_channel_coupling(AC3EncodeContext *s)
{
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 8ca6181e45..1a99b07b4f 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -212,6 +212,7 @@ void avcodec_register_all(void)
REGISTER_DECODER (VC1, vc1);
REGISTER_DECODER (VC1_CRYSTALHD, vc1_crystalhd);
REGISTER_DECODER (VC1_VDPAU, vc1_vdpau);
+ REGISTER_DECODER (VC1IMAGE, vc1image);
REGISTER_DECODER (VCR1, vcr1);
REGISTER_DECODER (VMDVIDEO, vmdvideo);
REGISTER_DECODER (VMNC, vmnc);
@@ -227,6 +228,7 @@ void avcodec_register_all(void)
REGISTER_DECODER (WMV3, wmv3);
REGISTER_DECODER (WMV3_CRYSTALHD, wmv3_crystalhd);
REGISTER_DECODER (WMV3_VDPAU, wmv3_vdpau);
+ REGISTER_DECODER (WMV3IMAGE, wmv3image);
REGISTER_DECODER (WNV1, wnv1);
REGISTER_DECODER (XAN_WC3, xan_wc3);
REGISTER_DECODER (XAN_WC4, xan_wc4);
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 2c5e70b136..7e526947f7 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -208,6 +208,8 @@ enum CodecID {
CODEC_ID_PRORES,
CODEC_ID_JV,
CODEC_ID_DFA,
+ CODEC_ID_WMV3IMAGE,
+ CODEC_ID_VC1IMAGE,
CODEC_ID_8SVX_RAW,
CODEC_ID_G2M,
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 9df6fbaf6b..750e3e0fbe 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -91,6 +91,8 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
break;
case CODEC_ID_VC1:
case CODEC_ID_WMV3:
+ case CODEC_ID_VC1IMAGE:
+ case CODEC_ID_WMV3IMAGE:
s->h263_pred = 1;
s->msmpeg4_version=6;
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 133369cc47..e74d43adb8 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -208,7 +208,12 @@ void ff_copy_picture(Picture *dst, Picture *src){
*/
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
{
- ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
+ /* Windows Media Image codecs allocate internal buffers with different
+ dimensions; ignore user defined callbacks for these */
+ if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
+ ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
+ else
+ avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
av_freep(&pic->f.hwaccel_picture_private);
}
@@ -230,7 +235,10 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
}
}
- r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
+ if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
+ r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
+ else
+ r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c
index 3cb556c3d4..04fa77a7de 100644
--- a/libavcodec/vc1.c
+++ b/libavcodec/vc1.c
@@ -314,9 +314,6 @@ int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitConte
"Old interlaced mode is not supported\n");
return -1;
}
- if (v->res_sprite) {
- av_log(avctx, AV_LOG_ERROR, "WMVP is not fully supported\n");
- }
}
// (fps-2)/4 (->30)
diff --git a/libavcodec/vc1.h b/libavcodec/vc1.h
index 6d4c0aa7a3..b7ef557704 100644
--- a/libavcodec/vc1.h
+++ b/libavcodec/vc1.h
@@ -311,6 +311,9 @@ typedef struct VC1Context{
//@{
int new_sprite;
int two_sprites;
+ AVFrame sprite_output_frame;
+ int output_width, output_height, sprite_width, sprite_height;
+ uint8_t* sr_rows[2][2]; ///< Sprite resizer line cache
//@}
int p_frame_skipped;
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 7c5ce62b47..bb9b804960 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -3278,116 +3278,279 @@ static void vc1_decode_blocks(VC1Context *v)
}
}
-static inline float get_float_val(GetBitContext* gb)
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+
+typedef struct {
+ /**
+ * Transform coefficients for both sprites in 16.16 fixed point format,
+ * in the order they appear in the bitstream:
+ * x scale
+ * rotation 1 (unused)
+ * x offset
+ * rotation 2 (unused)
+ * y scale
+ * y offset
+ * alpha
+ */
+ int coefs[2][7];
+
+ int effect_type, effect_flag;
+ int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
+ int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
+} SpriteData;
+
+static inline int get_fp_val(GetBitContext* gb)
{
- return (float)get_bits_long(gb, 30) / (1<<15) - (1<<14);
+ return (get_bits_long(gb, 30) - (1<<29)) << 1;
}
-static void vc1_sprite_parse_transform(VC1Context *v, GetBitContext* gb, float c[7])
+static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
{
- c[1] = c[3] = 0.0f;
+ c[1] = c[3] = 0;
switch (get_bits(gb, 2)) {
case 0:
- c[0] = 1.0f;
- c[2] = get_float_val(gb);
- c[4] = 1.0f;
+ c[0] = 1<<16;
+ c[2] = get_fp_val(gb);
+ c[4] = 1<<16;
break;
case 1:
- c[0] = c[4] = get_float_val(gb);
- c[2] = get_float_val(gb);
+ c[0] = c[4] = get_fp_val(gb);
+ c[2] = get_fp_val(gb);
break;
case 2:
- c[0] = get_float_val(gb);
- c[2] = get_float_val(gb);
- c[4] = get_float_val(gb);
+ c[0] = get_fp_val(gb);
+ c[2] = get_fp_val(gb);
+ c[4] = get_fp_val(gb);
break;
case 3:
- av_log_ask_for_sample(v->s.avctx, NULL);
- c[0] = get_float_val(gb);
- c[1] = get_float_val(gb);
- c[2] = get_float_val(gb);
- c[3] = get_float_val(gb);
- c[4] = get_float_val(gb);
+ c[0] = get_fp_val(gb);
+ c[1] = get_fp_val(gb);
+ c[2] = get_fp_val(gb);
+ c[3] = get_fp_val(gb);
+ c[4] = get_fp_val(gb);
break;
}
- c[5] = get_float_val(gb);
+ c[5] = get_fp_val(gb);
if (get_bits1(gb))
- c[6] = get_float_val(gb);
+ c[6] = get_fp_val(gb);
else
- c[6] = 1.0f;
+ c[6] = 1<<16;
}
-static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb)
+static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
{
- int effect_type, effect_flag, effect_pcount1, effect_pcount2, i;
- float effect_params1[14], effect_params2[10];
-
- float coefs[2][7];
- vc1_sprite_parse_transform(v, gb, coefs[0]);
- av_log(v->s.avctx, AV_LOG_DEBUG, "S1:");
- for (i = 0; i < 7; i++)
- av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", coefs[0][i]);
- av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
-
- if (v->two_sprites) {
- vc1_sprite_parse_transform(v, gb, coefs[1]);
- av_log(v->s.avctx, AV_LOG_DEBUG, "S2:");
+ AVCodecContext *avctx = v->s.avctx;
+ int sprite, i;
+
+ for (sprite = 0; sprite <= v->two_sprites; sprite++) {
+ vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
+ if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
+ av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
+ av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
for (i = 0; i < 7; i++)
- av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", coefs[1][i]);
- av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
+ av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
+ sd->coefs[sprite][i] / (1<<16),
+ (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1<<16));
+ av_log(avctx, AV_LOG_DEBUG, "\n");
}
+
skip_bits(gb, 2);
- if (effect_type = get_bits_long(gb, 30)){
- switch (effect_pcount1 = get_bits(gb, 4)) {
- case 2:
- effect_params1[0] = get_float_val(gb);
- effect_params1[1] = get_float_val(gb);
- break;
+ if (sd->effect_type = get_bits_long(gb, 30)) {
+ switch (sd->effect_pcount1 = get_bits(gb, 4)) {
case 7:
- vc1_sprite_parse_transform(v, gb, effect_params1);
+ vc1_sprite_parse_transform(gb, sd->effect_params1);
break;
case 14:
- vc1_sprite_parse_transform(v, gb, effect_params1);
- vc1_sprite_parse_transform(v, gb, &effect_params1[7]);
+ vc1_sprite_parse_transform(gb, sd->effect_params1);
+ vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
break;
default:
- av_log_ask_for_sample(v->s.avctx, NULL);
- return;
+ for (i = 0; i < sd->effect_pcount1; i++)
+ sd->effect_params1[i] = get_fp_val(gb);
}
- if (effect_type != 13 || effect_params1[0] != coefs[0][6]) {
+ if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
// effect 13 is simple alpha blending and matches the opacity above
- av_log(v->s.avctx, AV_LOG_DEBUG, "Effect: %d; params: ", effect_type);
- for (i = 0; i < effect_pcount1; i++)
- av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", effect_params1[i]);
- av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
+ av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
+ for (i = 0; i < sd->effect_pcount1; i++)
+ av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
+ sd->effect_params1[i] / (1<<16),
+ (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1<<16));
+ av_log(avctx, AV_LOG_DEBUG, "\n");
}
- effect_pcount2 = get_bits(gb, 16);
- if (effect_pcount2 > 10) {
- av_log(v->s.avctx, AV_LOG_ERROR, "Too many effect parameters\n");
+ sd->effect_pcount2 = get_bits(gb, 16);
+ if (sd->effect_pcount2 > 10) {
+ av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
return;
- } else if (effect_pcount2) {
- i = 0;
- av_log(v->s.avctx, AV_LOG_DEBUG, "Effect params 2: ");
- while (i < effect_pcount2){
- effect_params2[i] = get_float_val(gb);
- av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", effect_params2[i]);
- i++;
+ } else if (sd->effect_pcount2) {
+ i = -1;
+ av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
+ while (++i < sd->effect_pcount2){
+ sd->effect_params2[i] = get_fp_val(gb);
+ av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
+ sd->effect_params2[i] / (1<<16),
+ (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1<<16));
}
- av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
+ av_log(avctx, AV_LOG_DEBUG, "\n");
}
}
- if (effect_flag = get_bits1(gb))
- av_log(v->s.avctx, AV_LOG_DEBUG, "Effect flag set\n");
+ if (sd->effect_flag = get_bits1(gb))
+ av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
if (get_bits_count(gb) >= gb->size_in_bits +
- (v->s.avctx->codec_id == CODEC_ID_WMV3 ? 64 : 0))
- av_log(v->s.avctx, AV_LOG_ERROR, "Buffer overrun\n");
+ (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
+ av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
if (get_bits_count(gb) < gb->size_in_bits - 8)
- av_log(v->s.avctx, AV_LOG_WARNING, "Buffer not fully read\n");
+ av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
+}
+
+static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
+{
+ int i, plane, row, sprite;
+ int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
+ uint8_t* src_h[2][2];
+ int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
+ int ysub[2];
+ MpegEncContext *s = &v->s;
+
+ for (i = 0; i < 2; i++) {
+ xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
+ xadv[i] = sd->coefs[i][0];
+ if (xadv[i] != 1<<16 || (v->sprite_width<<16) - (v->output_width<<16) - xoff[i])
+ xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
+
+ yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
+ yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height<<16) - yoff[i]) / v->output_height);
+ }
+ alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
+
+ for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
+ int width = v->output_width>>!!plane;
+
+ for (row = 0; row < v->output_height>>!!plane; row++) {
+ uint8_t *dst = v->sprite_output_frame.data[plane] +
+ v->sprite_output_frame.linesize[plane] * row;
+
+ for (sprite = 0; sprite <= v->two_sprites; sprite++) {
+ uint8_t *iplane = s->current_picture.f.data[plane];
+ int iline = s->current_picture.f.linesize[plane];
+ int ycoord = yoff[sprite] + yadv[sprite]*row;
+ int yline = ycoord>>16;
+ ysub[sprite] = ycoord&0xFFFF;
+ if (sprite) {
+ iplane = s->last_picture.f.data[plane];
+ iline = s->last_picture.f.linesize[plane];
+ }
+ if (!(xoff[sprite]&0xFFFF) && xadv[sprite] == 1<<16) {
+ src_h[sprite][0] = iplane+(xoff[sprite]>>16)+ yline *iline;
+ if (ysub[sprite])
+ src_h[sprite][1] = iplane+(xoff[sprite]>>16)+(yline+1)*iline;
+ } else {
+ if (sr_cache[sprite][0] != yline) {
+ if (sr_cache[sprite][1] == yline) {
+ FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
+ FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
+ } else {
+ v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane+yline*iline, xoff[sprite], xadv[sprite], width);
+ sr_cache[sprite][0] = yline;
+ }
+ }
+ if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
+ v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane+(yline+1)*iline, xoff[sprite], xadv[sprite], width);
+ sr_cache[sprite][1] = yline + 1;
+ }
+ src_h[sprite][0] = v->sr_rows[sprite][0];
+ src_h[sprite][1] = v->sr_rows[sprite][1];
+ }
+ }
+
+ if (!v->two_sprites) {
+ if (ysub[0]) {
+ v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
+ } else {
+ memcpy(dst, src_h[0][0], width);
+ }
+ } else {
+ if (ysub[0] && ysub[1]) {
+ v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
+ src_h[1][0], src_h[1][1], ysub[1], alpha, width);
+ } else if (ysub[0]) {
+ v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
+ src_h[1][0], alpha, width);
+ } else if (ysub[1]) {
+ v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
+ src_h[0][0], (1<<16)-1-alpha, width);
+ } else {
+ v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
+ }
+ }
+ }
+
+ if (!plane) {
+ for (i = 0; i < 2; i++) {
+ xoff[i] >>= 1;
+ yoff[i] >>= 1;
+ }
+ }
+
+ }
+}
+
+
+static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
+{
+ MpegEncContext *s = &v->s;
+ AVCodecContext *avctx = s->avctx;
+ SpriteData sd;
+
+ vc1_parse_sprites(v, gb, &sd);
+
+ if (!s->current_picture.f.data[0]) {
+ av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
+ return -1;
+ }
+
+ if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
+ av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
+ v->two_sprites = 0;
+ }
+
+ if (v->sprite_output_frame.data[0])
+ avctx->release_buffer(avctx, &v->sprite_output_frame);
+
+ v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
+ v->sprite_output_frame.reference = 0;
+ if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ vc1_draw_sprites(v, &sd);
+
+ return 0;
+}
+
+static void vc1_sprite_flush(AVCodecContext *avctx)
+{
+ VC1Context *v = avctx->priv_data;
+ MpegEncContext *s = &v->s;
+ AVFrame *f = &s->current_picture.f;
+ int plane, i;
+
+ /* Windows Media Image codecs have a convergence interval of two keyframes.
+ Since we can't enforce it, clear to black the missing sprite. This is
+ wrong but it looks better than doing nothing. */
+
+ if (f->data[0])
+ for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
+ for (i = 0; i < v->sprite_height>>!!plane; i++)
+ memset(f->data[plane]+i*f->linesize[plane],
+ plane ? 128 : 0, f->linesize[plane]);
}
+#endif
+
/** Initialize a VC1/WMV3 decoder
* @todo TODO: Handle VC-1 IDUs (Transport level?)
* @todo TODO: Decypher remaining bits in extra_data
@@ -3399,6 +3562,10 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
GetBitContext gb;
int i, cur_width, cur_height;
+ /* save the container output size for WMImage */
+ v->output_width = avctx->width;
+ v->output_height = avctx->height;
+
if (!avctx->extradata_size || !avctx->extradata) return -1;
if (!(avctx->flags & CODEC_FLAG_GRAY))
avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
@@ -3420,7 +3587,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
cur_width = avctx->coded_width;
cur_height = avctx->coded_height;
- if (avctx->codec_id == CODEC_ID_WMV3)
+ if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE)
{
int count = 0;
@@ -3560,6 +3727,25 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
}
ff_intrax8_common_init(&v->x8,s);
+
+ if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
+ for (i = 0; i < 4; i++)
+ if (!(v->sr_rows[i>>1][i%2] = av_malloc(v->output_width))) return -1;
+
+ s->low_delay = 1;
+
+ v->sprite_width = avctx->coded_width;
+ v->sprite_height = avctx->coded_height;
+
+ avctx->coded_width = avctx->width = v->output_width;
+ avctx->coded_height = avctx->height = v->output_height;
+
+ // prevent 16.16 overflows
+ if (v->sprite_width > 1<<14 ||
+ v->sprite_height > 1<<14 ||
+ v->output_width > 1<<14 ||
+ v->output_height > 1<<14) return -1;
+ }
return 0;
}
@@ -3612,7 +3798,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
}
//for advanced profile we may need to parse and unescape data
- if (avctx->codec_id == CODEC_ID_VC1) {
+ if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
int buf_size2 = 0;
buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
@@ -3677,8 +3863,19 @@ static int vc1_decode_frame(AVCodecContext *avctx,
if (v->res_sprite) {
v->new_sprite = !get_bits1(&s->gb);
v->two_sprites = get_bits1(&s->gb);
- if (!v->new_sprite)
- goto end;
+ /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
+ we're using the sprite compositor. These are intentionally kept separate
+ so you can get the raw sprites by using the wmv3 decoder for WMVP or
+ the vc1 one for WVP2 */
+ if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
+ if (v->new_sprite) {
+ // switch AVCodecContext parameters to those of the sprites
+ avctx->width = avctx->coded_width = v->sprite_width;
+ avctx->height = avctx->coded_height = v->sprite_height;
+ } else {
+ goto image;
+ }
+ }
}
// do parse frame header
@@ -3692,8 +3889,10 @@ static int vc1_decode_frame(AVCodecContext *avctx,
}
}
- if (v->res_sprite && s->pict_type!=AV_PICTURE_TYPE_I) {
- av_log(v->s.avctx, AV_LOG_WARNING, "Sprite decoder: expected I-frame\n");
+ if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
+ && s->pict_type!=AV_PICTURE_TYPE_I) {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
+ goto err;
}
s->current_picture_ptr->f.repeat_pict = 0;
@@ -3765,6 +3964,19 @@ static int vc1_decode_frame(AVCodecContext *avctx,
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
+
+ if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
+image:
+ avctx->width = avctx->coded_width = v->output_width;
+ avctx->height = avctx->coded_height = v->output_height;
+ if (avctx->skip_frame >= AVDISCARD_NONREF) goto end;
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+ if (vc1_decode_sprites(v, &s->gb)) goto err;
+#endif
+ *pict = v->sprite_output_frame;
+ *data_size = sizeof(AVFrame);
+ } else {
+
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
} else if (s->last_picture_ptr != NULL) {
@@ -3776,9 +3988,9 @@ assert(s->current_picture.f.pict_type == s->pict_type);
ff_print_debug_info(s, pict);
}
+ }
+
end:
- if (v->res_sprite)
- vc1_parse_sprites(v, &s->gb);
av_free(buf2);
for (i = 0; i < n_slices; i++)
av_free(slices[i].buf);
@@ -3800,7 +4012,13 @@ err:
static av_cold int vc1_decode_end(AVCodecContext *avctx)
{
VC1Context *v = avctx->priv_data;
+ int i;
+ if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
+ && v->sprite_output_frame.data[0])
+ avctx->release_buffer(avctx, &v->sprite_output_frame);
+ for (i = 0; i < 4; i++)
+ av_freep(&v->sr_rows[i>>1][i%2]);
av_freep(&v->hrd_rate);
av_freep(&v->hrd_buffer);
MPV_common_end(&v->s);
@@ -3887,3 +4105,35 @@ AVCodec ff_vc1_vdpau_decoder = {
.profiles = NULL_IF_CONFIG_SMALL(profiles)
};
#endif
+
+#if CONFIG_WMV3IMAGE_DECODER
+AVCodec ff_wmv3image_decoder = {
+ .name = "wmv3image",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_WMV3IMAGE,
+ .priv_data_size = sizeof(VC1Context),
+ .init = vc1_decode_init,
+ .close = vc1_decode_end,
+ .decode = vc1_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .flush = vc1_sprite_flush,
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
+ .pix_fmts = ff_pixfmt_list_420
+};
+#endif
+
+#if CONFIG_VC1IMAGE_DECODER
+AVCodec ff_vc1image_decoder = {
+ .name = "vc1image",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_VC1IMAGE,
+ .priv_data_size = sizeof(VC1Context),
+ .init = vc1_decode_init,
+ .close = vc1_decode_end,
+ .decode = vc1_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .flush = vc1_sprite_flush,
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
+ .pix_fmts = ff_pixfmt_list_420
+};
+#endif
diff --git a/libavcodec/vc1dsp.c b/libavcodec/vc1dsp.c
index 2eaa47a05b..5d6ee92fd7 100644
--- a/libavcodec/vc1dsp.c
+++ b/libavcodec/vc1dsp.c
@@ -713,6 +713,66 @@ static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a
}
}
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+
+static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
+{
+ while (count--) {
+ int a = src[(offset >> 16) ];
+ int b = src[(offset >> 16) + 1];
+ *dst++ = a + ((b - a) * (offset&0xFFFF) >> 16);
+ offset += advance;
+ }
+}
+
+static av_always_inline void sprite_v_template(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
+ int two_sprites, const uint8_t *src2a, const uint8_t *src2b, int offset2,
+ int alpha, int scaled, int width)
+{
+ int a1, b1, a2, b2;
+ while (width--) {
+ a1 = *src1a++;
+ if (scaled) {
+ b1 = *src1b++;
+ a1 = a1 + ((b1 - a1) * offset1 >> 16);
+ }
+ if (two_sprites) {
+ a2 = *src2a++;
+ if (scaled > 1) {
+ b2 = *src2b++;
+ a2 = a2 + ((b2 - a2) * offset2 >> 16);
+ }
+ a1 = a1 + ((a2 - a1) * alpha >> 16);
+ }
+ *dst++ = a1;
+ }
+}
+
+static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
+{
+ sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, width);
+}
+
+static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
+{
+ sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
+}
+
+static void sprite_v_double_onescale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
+ const uint8_t *src2a, int alpha, int width)
+{
+ sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1, width);
+}
+
+static void sprite_v_double_twoscale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
+ const uint8_t *src2a, const uint8_t *src2b, int offset2,
+ int alpha, int width)
+{
+ sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2, alpha, 2, width);
+}
+
+#endif
+
av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
@@ -770,6 +830,14 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+ dsp->sprite_h = sprite_h_c;
+ dsp->sprite_v_single = sprite_v_single_c;
+ dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
+ dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
+ dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
+#endif
+
if (HAVE_ALTIVEC)
ff_vc1dsp_init_altivec(dsp);
if (HAVE_MMX)
diff --git a/libavcodec/vc1dsp.h b/libavcodec/vc1dsp.h
index 93a9ea3858..d96853aa16 100644
--- a/libavcodec/vc1dsp.h
+++ b/libavcodec/vc1dsp.h
@@ -60,6 +60,16 @@ typedef struct VC1DSPContext {
/* This is really one func used in VC-1 decoding */
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3];
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3];
+
+ /* Windows Media Image functions */
+ void (*sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count);
+ void (*sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width);
+ void (*sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width);
+ void (*sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
+ const uint8_t *src2a, int alpha, int width);
+ void (*sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
+ const uint8_t *src2a, const uint8_t *src2b, int offset2,
+ int alpha, int width);
} VC1DSPContext;
void ff_vc1dsp_init(VC1DSPContext* c);
diff --git a/libavcodec/version.h b/libavcodec/version.h
index f06184bded..dfa1445d8a 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53
-#define LIBAVCODEC_VERSION_MINOR 11
+#define LIBAVCODEC_VERSION_MINOR 12
#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \