diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-02-18 02:20:19 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-02-18 02:20:19 +0100 |
commit | bbb61a1cd5cb2046e480f367a7ae58a32f2ef907 (patch) | |
tree | 0e7cc2b59558e2dc31d6b8752d90f6b5b5c886e5 | |
parent | f6492476a63938cc66c51bf61c88407b7749f780 (diff) | |
parent | af468015d972c0dec5c8c37b2685ffa5cbe4ae87 (diff) | |
download | ffmpeg-bbb61a1cd5cb2046e480f367a7ae58a32f2ef907.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master: (22 commits)
als: prevent infinite loop in zero_remaining().
cook: prevent div-by-zero if channels is zero.
pamenc: switch to encode2().
svq1enc: switch to encode2().
dvenc: switch to encode2().
dpxenc: switch to encode2().
pngenc: switch to encode2().
v210enc: switch to encode2().
xwdenc: switch to encode2().
ttadec: use branchless unsigned-to-signed unfolding
avcodec: add a Sun Rasterfile encoder
sunrast: Move common defines to a new header file.
cdxl: fix video decoding for some files
cdxl: fix audio for some samples
apetag: add proper support for binary tags
ttadec: remove dead code
swscale: make access to filter data conditional on filter type.
swscale: update context offsets after removal of AlpMmxFilter.
prores: initialise encoder and decoder parts only when needed
swscale: make monowhite/black RGB-independent.
...
Conflicts:
Changelog
libavcodec/alsdec.c
libavcodec/dpxenc.c
libavcodec/golomb.h
libavcodec/pamenc.c
libavcodec/pngenc.c
libavformat/img2.c
libswscale/output.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | Changelog | 1 | ||||
-rw-r--r-- | doc/general.texi | 2 | ||||
-rw-r--r-- | libavcodec/Makefile | 1 | ||||
-rw-r--r-- | libavcodec/allcodecs.c | 2 | ||||
-rw-r--r-- | libavcodec/cdxl.c | 27 | ||||
-rw-r--r-- | libavcodec/cook.c | 4 | ||||
-rw-r--r-- | libavcodec/dpxenc.c | 35 | ||||
-rw-r--r-- | libavcodec/dv.c | 25 | ||||
-rw-r--r-- | libavcodec/flacdec.c | 9 | ||||
-rw-r--r-- | libavcodec/golomb.h | 2 | ||||
-rw-r--r-- | libavcodec/pamenc.c | 26 | ||||
-rw-r--r-- | libavcodec/pngenc.c | 29 | ||||
-rw-r--r-- | libavcodec/sinewin.h | 3 | ||||
-rw-r--r-- | libavcodec/sinewin_tablegen.c | 2 | ||||
-rw-r--r-- | libavcodec/sinewin_tablegen.h | 3 | ||||
-rw-r--r-- | libavcodec/sunrast.c | 27 | ||||
-rw-r--r-- | libavcodec/sunrast.h | 56 | ||||
-rw-r--r-- | libavcodec/sunrastenc.c | 225 | ||||
-rw-r--r-- | libavcodec/svq1enc.c | 24 | ||||
-rw-r--r-- | libavcodec/tta.c | 48 | ||||
-rw-r--r-- | libavcodec/v210enc.c | 22 | ||||
-rw-r--r-- | libavcodec/wmaprodec.c | 12 | ||||
-rw-r--r-- | libavcodec/xwdenc.c | 21 | ||||
-rw-r--r-- | libavformat/apetag.c | 35 | ||||
-rw-r--r-- | libavformat/cdxl.c | 18 | ||||
-rw-r--r-- | libavformat/img2.c | 3 | ||||
-rw-r--r-- | libswscale/output.c | 57 | ||||
-rw-r--r-- | libswscale/swscale_internal.h | 11 | ||||
-rw-r--r-- | libswscale/x86/swscale_template.c | 34 |
29 files changed, 565 insertions, 199 deletions
@@ -7,6 +7,7 @@ version next: - CDXL demuxer and decoder - Apple ProRes encoder - ffprobe -count_packets and -count_frames options +- Sun Rasterfile Encoder version 0.10: diff --git a/doc/general.texi b/doc/general.texi index 42ce322c86..d9b47b0c32 100644 --- a/doc/general.texi +++ b/doc/general.texi @@ -395,7 +395,7 @@ following image formats are supported: @tab V.Flash PTX format @item SGI @tab X @tab X @tab SGI RGB image format -@item Sun Rasterfile @tab @tab X +@item Sun Rasterfile @tab X @tab X @tab Sun RAS image format @item TIFF @tab X @tab X @tab YUV, JPEG and some extension is not supported yet. diff --git a/libavcodec/Makefile b/libavcodec/Makefile index 3866f2e9fa..0018dbb04f 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -393,6 +393,7 @@ OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o OBJS-$(CONFIG_SUNRAST_DECODER) += sunrast.o +OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o h263.o \ mpegvideo.o error_resilience.o OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o \ diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index 2b5c14c3ac..ef9a5ae52b 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -204,7 +204,7 @@ void avcodec_register_all(void) REGISTER_DECODER (SMC, smc); REGISTER_ENCDEC (SNOW, snow); REGISTER_DECODER (SP5X, sp5x); - REGISTER_DECODER (SUNRAST, sunrast); + REGISTER_ENCDEC (SUNRAST, sunrast); REGISTER_ENCDEC (SVQ1, svq1); REGISTER_DECODER (SVQ3, svq3); REGISTER_ENCDEC (TARGA, targa); diff --git a/libavcodec/cdxl.c b/libavcodec/cdxl.c index 8c9bea9768..a8546348dc 100644 --- a/libavcodec/cdxl.c +++ b/libavcodec/cdxl.c @@ -60,27 +60,29 @@ static void import_palette(CDXLVideoContext *c, uint32_t *new_palette) } } -static void bitplanar2chunky(CDXLVideoContext *c, int width, - int linesize, uint8_t *out) +static void bitplanar2chunky(CDXLVideoContext *c, int linesize, uint8_t *out) { + int skip = FFALIGN(c->avctx->width, 16) - c->avctx->width; GetBitContext gb; int x, y, plane; init_get_bits(&gb, c->video, c->video_size * 8); memset(out, 0, linesize * c->avctx->height); - for (plane = 0; plane < c->bpp; plane++) - for (y = 0; y < c->avctx->height; y++) - for (x = 0; x < width; x++) + for (plane = 0; plane < c->bpp; plane++) { + for (y = 0; y < c->avctx->height; y++) { + for (x = 0; x < c->avctx->width; x++) out[linesize * y + x] |= get_bits1(&gb) << plane; + skip_bits(&gb, skip); + } + } } static void cdxl_decode_rgb(CDXLVideoContext *c) { uint32_t *new_palette = (uint32_t *)c->frame.data[1]; - int padded_width = FFALIGN(c->avctx->width, 16); import_palette(c, new_palette); - bitplanar2chunky(c, padded_width, c->frame.linesize[0], c->frame.data[0]); + bitplanar2chunky(c, c->frame.linesize[0], c->frame.data[0]); } static void cdxl_decode_ham6(CDXLVideoContext *c) @@ -94,7 +96,7 @@ static void cdxl_decode_ham6(CDXLVideoContext *c) out = c->frame.data[0]; import_palette(c, new_palette); - bitplanar2chunky(c, avctx->width, avctx->width, c->new_video); + bitplanar2chunky(c, avctx->width, c->new_video); for (y = 0; y < avctx->height; y++) { r = new_palette[0] & 0xFF0000; @@ -137,7 +139,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c) out = c->frame.data[0]; import_palette(c, new_palette); - bitplanar2chunky(c, avctx->width, avctx->width, c->new_video); + bitplanar2chunky(c, avctx->width, c->new_video); for (y = 0; y < avctx->height; y++) { r = new_palette[0] & 0xFF0000; @@ -209,16 +211,13 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data, if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); + if (c->video_size < FFALIGN(avctx->width, 16) * avctx->height * c->bpp / 8) + return AVERROR_INVALIDDATA; if (encoding == 0) { - if (c->video_size < FFALIGN(avctx->width, 16) * - avctx->height * c->bpp / 8) - return AVERROR_INVALIDDATA; avctx->pix_fmt = PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; - if (c->video_size < avctx->width * avctx->height * c->bpp / 8) - return AVERROR_INVALIDDATA; avctx->pix_fmt = PIX_FMT_BGR24; } else { av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n", diff --git a/libavcodec/cook.c b/libavcodec/cook.c index 3968aae2f5..6598790877 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -1078,6 +1078,10 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) q->sample_rate = avctx->sample_rate; q->nb_channels = avctx->channels; q->bit_rate = avctx->bit_rate; + if (!q->nb_channels) { + av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n"); + return AVERROR_INVALIDDATA; + } /* Initialize RNG. */ av_lfg_init(&q->random_state, 0); diff --git a/libavcodec/dpxenc.c b/libavcodec/dpxenc.c index de32e4e959..bde02bce1c 100644 --- a/libavcodec/dpxenc.c +++ b/libavcodec/dpxenc.c @@ -22,6 +22,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "avcodec.h" +#include "internal.h" typedef struct DPXContext { AVFrame picture; @@ -104,14 +105,23 @@ static void encode_rgb48_10bit(AVCodecContext *avctx, const AVPicture *pic, uint } } -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { DPXContext *s = avctx->priv_data; - int size; + int size, ret; + uint8_t *buf; #define HEADER_SIZE 1664 /* DPX Generic header */ - if (buf_size < HEADER_SIZE) - return -1; + if (s->bits_per_component == 10) + size = avctx->height * avctx->width * 4; + else + size = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); + if ((ret = ff_alloc_packet(pkt, size + HEADER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + buf = pkt->data; memset(buf, 0, HEADER_SIZE); @@ -144,17 +154,14 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, switch(s->bits_per_component) { case 8: case 16: - size = avpicture_layout(data, avctx->pix_fmt, + size = avpicture_layout((const AVPicture*)frame, avctx->pix_fmt, avctx->width, avctx->height, - buf + HEADER_SIZE, buf_size - HEADER_SIZE); + buf + HEADER_SIZE, pkt->size - HEADER_SIZE); if (size < 0) return size; break; case 10: - size = avctx->height * avctx->width * 4; - if (buf_size < HEADER_SIZE + size) - return -1; - encode_rgb48_10bit(avctx, data, buf + HEADER_SIZE); + encode_rgb48_10bit(avctx, (const AVPicture*)frame, buf + HEADER_SIZE); break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", s->bits_per_component); @@ -164,7 +171,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, size += HEADER_SIZE; write32(buf + 16, size); /* file size */ - return size; + + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } AVCodec ff_dpx_encoder = { @@ -173,7 +184,7 @@ AVCodec ff_dpx_encoder = { .id = CODEC_ID_DPX, .priv_data_size = sizeof(DPXContext), .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_RGB24, PIX_FMT_RGBA, diff --git a/libavcodec/dv.c b/libavcodec/dv.c index da445a7699..4717de3924 100644 --- a/libavcodec/dv.c +++ b/libavcodec/dv.c @@ -42,6 +42,7 @@ #include "avcodec.h" #include "dsputil.h" #include "get_bits.h" +#include "internal.h" #include "put_bits.h" #include "simple_idct.h" #include "dvdata.h" @@ -1276,29 +1277,37 @@ static void dv_format_frame(DVVideoContext* c, uint8_t* buf) } -static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size, - void *data) +static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { DVVideoContext *s = c->priv_data; + int ret; s->sys = avpriv_dv_codec_profile(c); - if (!s->sys || buf_size < s->sys->frame_size || dv_init_dynamic_tables(s->sys)) + if (!s->sys || dv_init_dynamic_tables(s->sys)) return -1; + if ((ret = ff_alloc_packet(pkt, s->sys->frame_size)) < 0) { + av_log(c, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } c->pix_fmt = s->sys->pix_fmt; - s->picture = *((AVFrame *)data); + s->picture = *frame; s->picture.key_frame = 1; s->picture.pict_type = AV_PICTURE_TYPE_I; - s->buf = buf; + s->buf = pkt->data; c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL, dv_work_pool_size(s->sys), sizeof(DVwork_chunk)); emms_c(); - dv_format_frame(s, buf); + dv_format_frame(s, pkt->data); - return s->sys->frame_size; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } #endif @@ -1320,7 +1329,7 @@ AVCodec ff_dvvideo_encoder = { .id = CODEC_ID_DVVIDEO, .priv_data_size = sizeof(DVVideoContext), .init = dvvideo_init_encoder, - .encode = dvvideo_encode_frame, + .encode2 = dvvideo_encode_frame, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c index 055431b6d2..dc6bcaab57 100644 --- a/libavcodec/flacdec.c +++ b/libavcodec/flacdec.c @@ -422,7 +422,16 @@ static inline int decode_subframe(FLACContext *s, int channel) type = get_bits(&s->gb, 6); if (get_bits1(&s->gb)) { + int left = get_bits_left(&s->gb); wasted = 1; + if ( left < 0 || + (left < s->curr_bps && !show_bits_long(&s->gb, left)) || + !show_bits_long(&s->gb, s->curr_bps)) { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid number of wasted bits > available bits (%d) - left=%d\n", + s->curr_bps, left); + return AVERROR_INVALIDDATA; + } while (!get_bits1(&s->gb)) wasted++; s->curr_bps -= wasted; diff --git a/libavcodec/golomb.h b/libavcodec/golomb.h index 638357b470..2f474be22e 100644 --- a/libavcodec/golomb.h +++ b/libavcodec/golomb.h @@ -301,7 +301,7 @@ static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, int return buf; }else{ int i; - for(i=0; SHOW_UBITS(re, gb, 1) == 0; i++){ + for (i = 0; i < limit && SHOW_UBITS(re, gb, 1) == 0; i++) { if (gb->size_in_bits <= re_index) return -1; LAST_SKIP_BITS(re, gb, 1); diff --git a/libavcodec/pamenc.c b/libavcodec/pamenc.c index 41db4ecb2f..ea0b6f4a32 100644 --- a/libavcodec/pamenc.c +++ b/libavcodec/pamenc.c @@ -20,22 +20,24 @@ */ #include "avcodec.h" +#include "internal.h" #include "pnm.h" -static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, - int buf_size, void *data) +static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { PNMContext *s = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p = (AVFrame*)&s->picture; - int i, h, w, n, linesize, depth, maxval; + int i, h, w, n, linesize, depth, maxval, ret; const char *tuple_type; uint8_t *ptr; - if (buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200) { + if ((ret = ff_alloc_packet(pkt, avpicture_get_size(avctx->pix_fmt, + avctx->width, + avctx->height) + 200)) < 0) { av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; + return ret; } *p = *pict; @@ -43,8 +45,8 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, p->key_frame = 1; s->bytestream_start = - s->bytestream = outbuf; - s->bytestream_end = outbuf+buf_size; + s->bytestream = pkt->data; + s->bytestream_end = pkt->data + pkt->size; h = avctx->height; w = avctx->width; @@ -122,7 +124,11 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, ptr += linesize; } } - return s->bytestream - s->bytestream_start; + + pkt->size = s->bytestream - s->bytestream_start; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + return 0; } @@ -132,7 +138,7 @@ AVCodec ff_pam_encoder = { .id = CODEC_ID_PAM, .priv_data_size = sizeof(PNMContext), .init = ff_pnm_init, - .encode = pam_encode_frame, + .encode2 = pam_encode_frame, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY16BE, PIX_FMT_MONOBLACK, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"), }; diff --git a/libavcodec/pngenc.c b/libavcodec/pngenc.c index 69ca8e469d..60b896473a 100644 --- a/libavcodec/pngenc.c +++ b/libavcodec/pngenc.c @@ -212,12 +212,13 @@ static int png_write_row(PNGEncContext *s, const uint8_t *data, int size) return 0; } -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ PNGEncContext *s = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p= &s->picture; int bit_depth, color_type, y, len, row_size, ret, is_progressive; - int bits_per_pixel, pass_row_size; + int bits_per_pixel, pass_row_size, max_packet_size; int compression_level; uint8_t *ptr, *top; uint8_t *crow_base = NULL, *crow_buf, *crow; @@ -228,9 +229,17 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, p->pict_type= AV_PICTURE_TYPE_I; p->key_frame= 1; - s->bytestream_start= - s->bytestream= buf; - s->bytestream_end= buf+buf_size; + max_packet_size = IOBUF_SIZE*avctx->height + FF_MIN_BUFFER_SIZE; + if (!pkt->data && + (ret = av_new_packet(pkt, max_packet_size)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Could not allocate output packet of size %d.\n", + max_packet_size); + return ret; + } + + s->bytestream_start = + s->bytestream = pkt->data; + s->bytestream_end = pkt->data + pkt->size; is_progressive = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT); switch(avctx->pix_fmt) { @@ -393,7 +402,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, } png_write_chunk(&s->bytestream, MKTAG('I', 'E', 'N', 'D'), NULL, 0); - ret = s->bytestream - s->bytestream_start; + pkt->size = s->bytestream - s->bytestream_start; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + ret = 0; + the_end: av_free(crow_base); av_free(progressive_buf); @@ -425,7 +438,7 @@ AVCodec ff_png_encoder = { .id = CODEC_ID_PNG, .priv_data_size = sizeof(PNGEncContext), .init = png_enc_init, - .encode = encode_frame, + .encode2 = encode_frame, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_PAL8, diff --git a/libavcodec/sinewin.h b/libavcodec/sinewin.h index 61135fd6a2..bf09f13083 100644 --- a/libavcodec/sinewin.h +++ b/libavcodec/sinewin.h @@ -53,7 +53,8 @@ extern SINETABLE( 512); extern SINETABLE(1024); extern SINETABLE(2048); extern SINETABLE(4096); +extern SINETABLE(8192); -extern SINETABLE_CONST float * const ff_sine_windows[13]; +extern SINETABLE_CONST float * const ff_sine_windows[14]; #endif /* AVCODEC_SINEWIN_H */ diff --git a/libavcodec/sinewin_tablegen.c b/libavcodec/sinewin_tablegen.c index 48eb771e48..b2988d3160 100644 --- a/libavcodec/sinewin_tablegen.c +++ b/libavcodec/sinewin_tablegen.c @@ -38,7 +38,7 @@ int main(void) write_fileheader(); - for (i = 5; i <= 12; i++) { + for (i = 5; i <= 13; i++) { ff_init_ff_sine_windows(i); printf("SINETABLE(%4i) = {\n", 1 << i); write_float_array(ff_sine_windows[i], 1 << i); diff --git a/libavcodec/sinewin_tablegen.h b/libavcodec/sinewin_tablegen.h index f587595c21..dd7d992ae1 100644 --- a/libavcodec/sinewin_tablegen.h +++ b/libavcodec/sinewin_tablegen.h @@ -38,6 +38,7 @@ SINETABLE( 512); SINETABLE(1024); SINETABLE(2048); SINETABLE(4096); +SINETABLE(8192); #else #include "libavcodec/sinewin_tables.h" #endif @@ -45,7 +46,7 @@ SINETABLE(4096); SINETABLE_CONST float * const ff_sine_windows[] = { NULL, NULL, NULL, NULL, NULL, // unused ff_sine_32 , ff_sine_64 , - ff_sine_128, ff_sine_256, ff_sine_512, ff_sine_1024, ff_sine_2048, ff_sine_4096 + ff_sine_128, ff_sine_256, ff_sine_512, ff_sine_1024, ff_sine_2048, ff_sine_4096, ff_sine_8192 }; // Generate a sine window. diff --git a/libavcodec/sunrast.c b/libavcodec/sunrast.c index aab6435cdd..cc7a9fb4e4 100644 --- a/libavcodec/sunrast.c +++ b/libavcodec/sunrast.c @@ -22,32 +22,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/imgutils.h" #include "avcodec.h" - -#define RAS_MAGIC 0x59a66a95 - -/* The Old and Standard format types indicate that the image data is - * uncompressed. There is no difference between the two formats. */ -#define RT_OLD 0 -#define RT_STANDARD 1 - -/* The Byte-Encoded format type indicates that the image data is compressed - * using a run-length encoding scheme. */ -#define RT_BYTE_ENCODED 2 - -/* The RGB format type indicates that the image is uncompressed with reverse - * component order from Old and Standard (RGB vs BGR). */ -#define RT_FORMAT_RGB 3 - -/* The TIFF and IFF format types indicate that the raster file was originally - * converted from either of these file formats. We do not have any samples or - * documentation of the format details. */ -#define RT_FORMAT_TIFF 4 -#define RT_FORMAT_IFF 5 - -/* The Experimental format type is implementation-specific and is generally an - * indication that the image file does not conform to the Sun Raster file - * format specification. */ -#define RT_EXPERIMENTAL 0xffff +#include "sunrast.h" typedef struct SUNRASTContext { AVFrame picture; diff --git a/libavcodec/sunrast.h b/libavcodec/sunrast.h new file mode 100644 index 0000000000..d9fe307b67 --- /dev/null +++ b/libavcodec/sunrast.h @@ -0,0 +1,56 @@ +/* + * Sun Rasterfile Image Format + * Copyright (c) 2007, 2008 Ivo van Poorten + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_SUNRAST_H +#define AVCODEC_SUNRAST_H + +#define RAS_MAGIC 0x59a66a95 + +#define RMT_NONE 0 +#define RMT_EQUAL_RGB 1 +#define RMT_RAW 2 ///< the data layout of this map type is unknown + +/* The Old and Standard format types indicate that the image data is + * uncompressed. There is no difference between the two formats. */ +#define RT_OLD 0 +#define RT_STANDARD 1 + +/* The Byte-Encoded format type indicates that the image data is compressed + * using a run-length encoding scheme. */ +#define RT_BYTE_ENCODED 2 +#define RLE_TRIGGER 0x80 + +/* The RGB format type indicates that the image is uncompressed with reverse + * component order from Old and Standard (RGB vs BGR). */ +#define RT_FORMAT_RGB 3 + +/* The TIFF and IFF format types indicate that the raster file was originally + * converted from either of these file formats. We do not have any samples or + * documentation of the format details. */ +#define RT_FORMAT_TIFF 4 +#define RT_FORMAT_IFF 5 + +/* The Experimental format type is implementation-specific and is generally an + * indication that the image file does not conform to the Sun Raster file + * format specification. */ +#define RT_EXPERIMENTAL 0xffff + +#endif /* AVCODEC_SUNRAST_H */ diff --git a/libavcodec/sunrastenc.c b/libavcodec/sunrastenc.c new file mode 100644 index 0000000000..21d6fc47a0 --- /dev/null +++ b/libavcodec/sunrastenc.c @@ -0,0 +1,225 @@ +/* + * Sun Rasterfile (.sun/.ras/im{1,8,24}/.sunras) image encoder + * Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com> + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "avcodec.h" +#include "bytestream.h" +#include "internal.h" +#include "sunrast.h" + +typedef struct SUNRASTContext { + AVFrame picture; + PutByteContext p; + int depth; ///< depth of pixel + int length; ///< length (bytes) of image + int type; ///< type of file + int maptype; ///< type of colormap + int maplength; ///< length (bytes) of colormap + int size; +} SUNRASTContext; + +static void sunrast_image_write_header(AVCodecContext *avctx) +{ + SUNRASTContext *s = avctx->priv_data; + + bytestream2_put_be32u(&s->p, RAS_MAGIC); + bytestream2_put_be32u(&s->p, avctx->width); + bytestream2_put_be32u(&s->p, avctx->height); + bytestream2_put_be32u(&s->p, s->depth); + bytestream2_put_be32u(&s->p, s->length); + bytestream2_put_be32u(&s->p, s->type); + bytestream2_put_be32u(&s->p, s->maptype); + bytestream2_put_be32u(&s->p, s->maplength); +} + +static void sunrast_image_write_image(AVCodecContext *avctx, + const uint8_t *pixels, + const uint32_t *palette_data, + int linesize) +{ + SUNRASTContext *s = avctx->priv_data; + const uint8_t *ptr; + int len, alen, x; + + if (s->maplength) { // palettized + PutByteContext pb_r, pb_g; + int len = s->maplength / 3; + + pb_r = s->p; + bytestream2_skip_p(&s->p, len); + pb_g = s->p; + bytestream2_skip_p(&s->p, len); + + for (x = 0; x < len; x++) { + uint32_t pixel = palette_data[x]; + + bytestream2_put_byteu(&pb_r, (pixel >> 16) & 0xFF); + bytestream2_put_byteu(&pb_g, (pixel >> 8) & 0xFF); + bytestream2_put_byteu(&s->p, pixel & 0xFF); + } + } + + len = (s->depth * avctx->width + 7) >> 3; + alen = len + (len & 1); + ptr = pixels; + + if (s->type == RT_BYTE_ENCODED) { + uint8_t value, value2; + int run; + const uint8_t *end = pixels + avctx->height * linesize; + + ptr = pixels; + +#define GET_VALUE ptr >= end ? 0 : x >= len ? ptr[len-1] : ptr[x] + + x = 0; + value2 = GET_VALUE; + while (ptr < end) { + run = 1; + value = value2; + x++; + if (x >= alen) { + x = 0; + ptr += linesize; + } + + value2 = GET_VALUE; + while (value2 == value && run < 256 && ptr < end) { + x++; + run++; + if (x >= alen) { + x = 0; + ptr += linesize; + } + value2 = GET_VALUE; + } + + if (run > 2 || value == RLE_TRIGGER) { + bytestream2_put_byteu(&s->p, RLE_TRIGGER); + bytestream2_put_byteu(&s->p, run - 1); + if (run > 1) + bytestream2_put_byteu(&s->p, value); + } else if (run == 1) { + bytestream2_put_byteu(&s->p, value); + } else + bytestream2_put_be16u(&s->p, (value << 8) | value); + } + + // update data length for header + s->length = bytestream2_tell_p(&s->p) - 32 - s->maplength; + } else { + int y; + for (y = 0; y < avctx->height; y++) { + bytestream2_put_buffer(&s->p, ptr, len); + if (len < alen) + bytestream2_put_byteu(&s->p, 0); + ptr += linesize; + } + } +} + +static av_cold int sunrast_encode_init(AVCodecContext *avctx) +{ + SUNRASTContext *s = avctx->priv_data; + + switch (avctx->coder_type) { + case FF_CODER_TYPE_RLE: + s->type = RT_BYTE_ENCODED; + break; + case FF_CODER_TYPE_RAW: + s->type = RT_STANDARD; + break; + default: + av_log(avctx, AV_LOG_ERROR, "invalid coder_type\n"); + return AVERROR(EINVAL); + } + + avctx->coded_frame = &s->picture; + avctx->coded_frame->key_frame = 1; + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + s->maptype = RMT_NONE; + s->maplength = 0; + + switch (avctx->pix_fmt) { + case PIX_FMT_MONOWHITE: + s->depth = 1; + break; + case PIX_FMT_PAL8 : + s->maptype = RMT_EQUAL_RGB; + s->maplength = 3 * 256; + case PIX_FMT_GRAY8: + s->depth = 8; + break; + case PIX_FMT_BGR24: + s->depth = 24; + break; + default: + return AVERROR_BUG; + } + s->length = avctx->height * (FFALIGN(avctx->width * s->depth, 16) >> 3); + s->size = 32 + s->maplength + + s->length * (s->type == RT_BYTE_ENCODED ? 2 : 1); + + return 0; +} + +static int sunrast_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr) +{ + SUNRASTContext *s = avctx->priv_data; + int ret; + + if ((ret = ff_alloc_packet(avpkt, s->size)) < 0) + return ret; + + bytestream2_init_writer(&s->p, avpkt->data, avpkt->size); + sunrast_image_write_header(avctx); + sunrast_image_write_image(avctx, frame->data[0], + (const uint32_t *)frame->data[1], + frame->linesize[0]); + // update data length in header after RLE + if (s->type == RT_BYTE_ENCODED) + AV_WB32(&avpkt->data[16], s->length); + + *got_packet_ptr = 1; + avpkt->size = bytestream2_tell_p(&s->p); + return 0; +} + +static const AVCodecDefault sunrast_defaults[] = { + { "coder", "rle" }, + { NULL }, +}; + +AVCodec ff_sunrast_encoder = { + .name = "sunrast", + .type = AVMEDIA_TYPE_VIDEO, + .id = CODEC_ID_SUNRAST, + .priv_data_size = sizeof(SUNRASTContext), + .init = sunrast_encode_init, + .encode2 = sunrast_encode_frame, + .defaults = sunrast_defaults, + .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, + PIX_FMT_PAL8, + PIX_FMT_GRAY8, + PIX_FMT_MONOWHITE, + PIX_FMT_NONE }, + .long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"), +}; diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c index b7c196707e..84ff0d8814 100644 --- a/libavcodec/svq1enc.c +++ b/libavcodec/svq1enc.c @@ -497,14 +497,19 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx) return 0; } -static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf, - int buf_size, void *data) +static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { SVQ1Context * const s = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; AVFrame temp; - int i; + int i, ret; + + if (!pkt->data && + (ret = av_new_packet(pkt, s->y_block_width*s->y_block_height*MAX_MB_BYTES*3 + FF_MIN_BUFFER_SIZE) < 0)) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } if(avctx->pix_fmt != PIX_FMT_YUV410P){ av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n"); @@ -521,7 +526,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf, s->current_picture= s->last_picture; s->last_picture= temp; - init_put_bits(&s->pb, buf, buf_size); + init_put_bits(&s->pb, pkt->data, pkt->size); *p = *pict; p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; @@ -542,7 +547,12 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf, flush_put_bits(&s->pb); - return put_bits_count(&s->pb) / 8; + pkt->size = put_bits_count(&s->pb) / 8; + if (p->pict_type == AV_PICTURE_TYPE_I) + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } static av_cold int svq1_encode_end(AVCodecContext *avctx) @@ -578,7 +588,7 @@ AVCodec ff_svq1_encoder = { .id = CODEC_ID_SVQ1, .priv_data_size = sizeof(SVQ1Context), .init = svq1_encode_init, - .encode = svq1_encode_frame, + .encode2 = svq1_encode_frame, .close = svq1_encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), diff --git a/libavcodec/tta.c b/libavcodec/tta.c index 2a4c2e859c..231b19171d 100644 --- a/libavcodec/tta.c +++ b/libavcodec/tta.c @@ -39,7 +39,7 @@ #define MAX_ORDER 16 typedef struct TTAFilter { - int32_t shift, round, error, mode; + int32_t shift, round, error; int32_t qm[MAX_ORDER]; int32_t dx[MAX_ORDER]; int32_t dl[MAX_ORDER]; @@ -84,19 +84,18 @@ static const uint32_t shift_1[] = { static const uint32_t * const shift_16 = shift_1 + 4; -static const int32_t ttafilter_configs[4][2] = { - {10, 1}, - {9, 1}, - {10, 1}, - {12, 0} +static const int32_t ttafilter_configs[4] = { + 10, + 9, + 10, + 12 }; -static void ttafilter_init(TTAFilter *c, int32_t shift, int32_t mode) { +static void ttafilter_init(TTAFilter *c, int32_t shift) { memset(c, 0, sizeof(TTAFilter)); c->shift = shift; c->round = shift_1[shift-1]; // c->round = 1 << (shift - 1); - c->mode = mode; } // FIXME: copy paste from original @@ -111,9 +110,8 @@ static inline void memshl(register int32_t *a, register int32_t *b) { *a = *b; } -// FIXME: copy paste from original -// mode=1 encoder, mode=0 decoder -static inline void ttafilter_process(TTAFilter *c, int32_t *in, int32_t mode) { +static inline void ttafilter_process(TTAFilter *c, int32_t *in) +{ register int32_t *dl = c->dl, *qm = c->qm, *dx = c->dx, sum = c->round; if (!c->error) { @@ -151,22 +149,13 @@ static inline void ttafilter_process(TTAFilter *c, int32_t *in, int32_t mode) { *(dx-2) = ((*(dl-3) >> 30) | 1) << 1; *(dx-3) = ((*(dl-4) >> 30) | 1); - // compress - if (mode) { - *dl = *in; - *in -= (sum >> c->shift); - c->error = *in; - } else { - c->error = *in; - *in += (sum >> c->shift); - *dl = *in; - } + c->error = *in; + *in += (sum >> c->shift); + *dl = *in; - if (c->mode) { - *(dl-1) = *dl - *(dl-1); - *(dl-2) = *(dl-1) - *(dl-2); - *(dl-3) = *(dl-2) - *(dl-3); - } + *(dl-1) = *dl - *(dl-1); + *(dl-2) = *(dl-1) - *(dl-2); + *(dl-3) = *(dl-2) - *(dl-3); memshl(c->dl, c->dl + 1); memshl(c->dx, c->dx + 1); @@ -368,7 +357,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, // init per channel states for (i = 0; i < s->channels; i++) { s->ch_ctx[i].predictor = 0; - ttafilter_init(&s->ch_ctx[i].filter, ttafilter_configs[s->bps-1][0], ttafilter_configs[s->bps-1][1]); + ttafilter_init(&s->ch_ctx[i].filter, ttafilter_configs[s->bps-1]); rice_init(&s->ch_ctx[i].rice, 10, 10); } @@ -422,11 +411,10 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, } // extract coded value -#define UNFOLD(x) (((x)&1) ? (++(x)>>1) : (-(x)>>1)) - *p = UNFOLD(value); + *p = 1 + ((value >> 1) ^ ((value & 1) - 1)); // run hybrid filter - ttafilter_process(filter, p, 0); + ttafilter_process(filter, p); // fixed order prediction #define PRED(x, k) (int32_t)((((uint64_t)x << k) - x) >> k) diff --git a/libavcodec/v210enc.c b/libavcodec/v210enc.c index 2bf60d7bfc..93fa2d8a73 100644 --- a/libavcodec/v210enc.c +++ b/libavcodec/v210enc.c @@ -23,6 +23,7 @@ #include "avcodec.h" #include "bytestream.h" +#include "internal.h" static av_cold int encode_init(AVCodecContext *avctx) { @@ -44,25 +45,24 @@ static av_cold int encode_init(AVCodecContext *avctx) return 0; } -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, - int buf_size, void *data) +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pic, int *got_packet) { - const AVFrame *pic = data; int aligned_width = ((avctx->width + 47) / 48) * 48; int stride = aligned_width * 8 / 3; int line_padding = stride - ((avctx->width * 8 + 11) / 12) * 4; - int h, w; + int h, w, ret; const uint16_t *y = (const uint16_t*)pic->data[0]; const uint16_t *u = (const uint16_t*)pic->data[1]; const uint16_t *v = (const uint16_t*)pic->data[2]; PutByteContext p; - if (buf_size < avctx->height * stride) { - av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); - return AVERROR(ENOMEM); + if ((ret = ff_alloc_packet(pkt, avctx->height * stride)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; } - bytestream2_init_writer(&p, buf, buf_size); + bytestream2_init_writer(&p, pkt->data, pkt->size); #define CLIP(v) av_clip(v, 4, 1019) @@ -104,7 +104,9 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, v += pic->linesize[2] / 2 - avctx->width / 2; } - return bytestream2_tell_p(&p); + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + return 0; } static av_cold int encode_close(AVCodecContext *avctx) @@ -119,7 +121,7 @@ AVCodec ff_v210_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_V210, .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, .close = encode_close, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P10, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c index 1e2f246847..3fd4836fd9 100644 --- a/libavcodec/wmaprodec.c +++ b/libavcodec/wmaprodec.c @@ -105,7 +105,7 @@ #define MAX_FRAMESIZE 32768 ///< maximum compressed frame size #define WMAPRO_BLOCK_MIN_BITS 6 ///< log2 of min block size -#define WMAPRO_BLOCK_MAX_BITS 12 ///< log2 of max block size +#define WMAPRO_BLOCK_MAX_BITS 13 ///< log2 of max block size #define WMAPRO_BLOCK_MAX_SIZE (1 << WMAPRO_BLOCK_MAX_BITS) ///< maximum block size #define WMAPRO_BLOCK_SIZES (WMAPRO_BLOCK_MAX_BITS - WMAPRO_BLOCK_MIN_BITS + 1) ///< possible block sizes @@ -276,7 +276,7 @@ static av_cold int decode_init(AVCodecContext *avctx) WMAProDecodeCtx *s = avctx->priv_data; uint8_t *edata_ptr = avctx->extradata; unsigned int channel_mask; - int i; + int i, bits; int log2_max_num_subframes; int num_possible_block_sizes; @@ -310,8 +310,12 @@ static av_cold int decode_init(AVCodecContext *avctx) s->len_prefix = (s->decode_flags & 0x40); /** get frame len */ - s->samples_per_frame = 1 << ff_wma_get_frame_len_bits(avctx->sample_rate, - 3, s->decode_flags); + bits = ff_wma_get_frame_len_bits(avctx->sample_rate, 3, s->decode_flags); + if (bits > WMAPRO_BLOCK_MAX_BITS) { + av_log_missing_feature(avctx, "14-bits block sizes", 1); + return AVERROR_INVALIDDATA; + } + s->samples_per_frame = 1 << bits; /** subframe info */ log2_max_num_subframes = ((s->decode_flags & 0x38) >> 3); diff --git a/libavcodec/xwdenc.c b/libavcodec/xwdenc.c index 5bfdaf780a..67fac81619 100644 --- a/libavcodec/xwdenc.c +++ b/libavcodec/xwdenc.c @@ -24,6 +24,7 @@ #include "libavutil/pixdesc.h" #include "avcodec.h" #include "bytestream.h" +#include "internal.h" #include "xwd.h" #define WINDOW_NAME "lavcxwdenc" @@ -38,16 +39,15 @@ static av_cold int xwd_encode_init(AVCodecContext *avctx) return 0; } -static int xwd_encode_frame(AVCodecContext *avctx, uint8_t *buf, - int buf_size, void *data) +static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *p, int *got_packet) { - AVFrame *p = data; enum PixelFormat pix_fmt = avctx->pix_fmt; uint32_t pixdepth, bpp, bpad, ncolors = 0, lsize, vclass, be = 0; uint32_t rgb[3] = { 0 }; uint32_t header_size; - int i, out_size; - uint8_t *ptr; + int i, out_size, ret; + uint8_t *ptr, *buf; pixdepth = av_get_bits_per_pixel(&av_pix_fmt_descriptors[pix_fmt]); if (av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BE) @@ -146,10 +146,11 @@ static int xwd_encode_frame(AVCodecContext *avctx, uint8_t *buf, header_size = XWD_HEADER_SIZE + WINDOW_NAME_SIZE; out_size = header_size + ncolors * XWD_CMAP_SIZE + avctx->height * lsize; - if (buf_size < out_size) { + if ((ret = ff_alloc_packet(pkt, out_size)) < 0) { av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); - return AVERROR(ENOMEM); + return ret; } + buf = pkt->data; avctx->coded_frame->key_frame = 1; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; @@ -204,7 +205,9 @@ static int xwd_encode_frame(AVCodecContext *avctx, uint8_t *buf, ptr += p->linesize[0]; } - return out_size; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + return 0; } static av_cold int xwd_encode_close(AVCodecContext *avctx) @@ -219,7 +222,7 @@ AVCodec ff_xwd_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_XWD, .init = xwd_encode_init, - .encode = xwd_encode_frame, + .encode2 = xwd_encode_frame, .close = xwd_encode_close, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGRA, PIX_FMT_RGBA, diff --git a/libavformat/apetag.c b/libavformat/apetag.c index 8d53e4cdf7..7656555125 100644 --- a/libavformat/apetag.c +++ b/libavformat/apetag.c @@ -29,16 +29,17 @@ #define APE_TAG_FOOTER_BYTES 32 #define APE_TAG_FLAG_CONTAINS_HEADER (1 << 31) #define APE_TAG_FLAG_IS_HEADER (1 << 29) +#define APE_TAG_FLAG_IS_BINARY (1 << 1) static int ape_tag_read_field(AVFormatContext *s) { AVIOContext *pb = s->pb; uint8_t key[1024], *value; - uint32_t size; + uint32_t size, flags; int i, c; size = avio_rl32(pb); /* field size */ - avio_skip(pb, 4); /* field flags */ + flags = avio_rl32(pb); /* field flags */ for (i = 0; i < sizeof(key) - 1; i++) { c = avio_r8(pb); if (c < 0x20 || c > 0x7E) @@ -53,12 +54,30 @@ static int ape_tag_read_field(AVFormatContext *s) } if (size >= UINT_MAX) return -1; - value = av_malloc(size+1); - if (!value) - return AVERROR(ENOMEM); - avio_read(pb, value, size); - value[size] = 0; - av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); + if (flags & APE_TAG_FLAG_IS_BINARY) { + uint8_t filename[1024]; + AVStream *st = avformat_new_stream(s, NULL); + if (!st) + return AVERROR(ENOMEM); + avio_get_str(pb, INT_MAX, filename, sizeof(filename)); + st->codec->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); + if (!st->codec->extradata) + return AVERROR(ENOMEM); + if (avio_read(pb, st->codec->extradata, size) != size) { + av_freep(&st->codec->extradata); + return AVERROR(EIO); + } + st->codec->extradata_size = size; + av_dict_set(&st->metadata, key, filename, 0); + st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT; + } else { + value = av_malloc(size+1); + if (!value) + return AVERROR(ENOMEM); + c = avio_read(pb, value, size); + value[c] = 0; + av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); + } return 0; } diff --git a/libavformat/cdxl.c b/libavformat/cdxl.c index f2956dd2f2..49077b4a36 100644 --- a/libavformat/cdxl.c +++ b/libavformat/cdxl.c @@ -62,9 +62,8 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt) { CDXLDemuxContext *cdxl = s->priv_data; AVIOContext *pb = s->pb; - uint32_t current_size; - uint16_t audio_size, palette_size; - int32_t video_size; + uint32_t current_size, video_size, image_size; + uint16_t audio_size, palette_size, width, height; int64_t pos; int ret; @@ -81,14 +80,17 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt) } current_size = AV_RB32(&cdxl->header[2]); + width = AV_RB16(&cdxl->header[14]); + height = AV_RB16(&cdxl->header[16]); palette_size = AV_RB16(&cdxl->header[20]); audio_size = AV_RB16(&cdxl->header[22]); + image_size = FFALIGN(width, 16) * height * cdxl->header[19] / 8; + video_size = palette_size + image_size; if (palette_size > 512) return AVERROR_INVALIDDATA; - if (current_size < audio_size + palette_size + CDXL_HEADER_SIZE) + if (current_size < (uint64_t)audio_size + video_size + CDXL_HEADER_SIZE) return AVERROR_INVALIDDATA; - video_size = current_size - audio_size - CDXL_HEADER_SIZE; if (cdxl->read_chunk && audio_size) { if (cdxl->audio_stream_index == -1) { @@ -121,8 +123,8 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt) st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_tag = 0; st->codec->codec_id = CODEC_ID_CDXL; - st->codec->width = AV_RB16(&cdxl->header[14]); - st->codec->height = AV_RB16(&cdxl->header[16]); + st->codec->width = width; + st->codec->height = height; cdxl->video_stream_index = st->index; avpriv_set_pts_info(st, 63, cdxl->fps.den, cdxl->fps.num); } @@ -141,6 +143,8 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt) cdxl->read_chunk = audio_size; } + if (!cdxl->read_chunk) + avio_skip(pb, current_size - audio_size - video_size - CDXL_HEADER_SIZE); return ret; } diff --git a/libavformat/img2.c b/libavformat/img2.c index 622d759866..2309a8913f 100644 --- a/libavformat/img2.c +++ b/libavformat/img2.c @@ -520,7 +520,8 @@ AVOutputFormat ff_image2_muxer = { .name = "image2", .long_name = NULL_IF_CONFIG_SMALL("image2 sequence"), .extensions = "bmp,dpx,jls,jpeg,jpg,ljpg,pam,pbm,pcx,pgm,pgmyuv,png," - "ppm,sgi,tga,tif,tiff,jp2,j2c,xwd", + "ppm,sgi,tga,tif,tiff,jp2,j2c,xwd,sun,ras,rs,im1,im8,im24," + "sunras", .priv_data_size = sizeof(VideoData), .video_codec = CODEC_ID_MJPEG, .write_header = write_header, diff --git a/libswscale/output.c b/libswscale/output.c index 75d0baad39..cae2c31805 100644 --- a/libswscale/output.c +++ b/libswscale/output.c @@ -298,6 +298,9 @@ static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterS } } +#define accumulate_bit(acc, val) \ + acc <<= 1; \ + acc |= (val) >= (128 + 110) #define output_pixel(pos, acc) \ if (target == PIX_FMT_MONOBLACK) { \ pos = acc; \ @@ -314,7 +317,6 @@ yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, int y, enum PixelFormat target) { const uint8_t * const d128=dither_8x8_220[y&7]; - uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; int i; unsigned acc = 0; @@ -333,8 +335,8 @@ yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, Y1 = av_clip_uint8(Y1); Y2 = av_clip_uint8(Y2); } - acc += acc + g[Y1 + d128[(i + 0) & 7]]; - acc += acc + g[Y2 + d128[(i + 1) & 7]]; + accumulate_bit(acc, Y1 + d128[(i + 0) & 7]); + accumulate_bit(acc, Y2 + d128[(i + 1) & 7]); if ((i & 7) == 6) { output_pixel(*dest++, acc); } @@ -350,19 +352,29 @@ yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], { const int16_t *buf0 = buf[0], *buf1 = buf[1]; const uint8_t * const d128 = dither_8x8_220[y & 7]; - uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; int yalpha1 = 4095 - yalpha; int i; for (i = 0; i < dstW - 7; i += 8) { - int acc = g[((buf0[i ] * yalpha1 + buf1[i ] * yalpha) >> 19) + d128[0]]; - acc += acc + g[((buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19) + d128[1]]; - acc += acc + g[((buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19) + d128[2]]; - acc += acc + g[((buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19) + d128[3]]; - acc += acc + g[((buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19) + d128[4]]; - acc += acc + g[((buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19) + d128[5]]; - acc += acc + g[((buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19) + d128[6]]; - acc += acc + g[((buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19) + d128[7]]; + int Y, acc = 0; + + Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[0]); + Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[1]); + Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[2]); + Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[3]); + Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[4]); + Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[5]); + Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[6]); + Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19; + accumulate_bit(acc, Y + d128[7]); + output_pixel(*dest++, acc); } } @@ -374,23 +386,26 @@ yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, int uvalpha, int y, enum PixelFormat target) { const uint8_t * const d128 = dither_8x8_220[y & 7]; - uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; int i; for (i = 0; i < dstW - 7; i += 8) { - int acc = g[((buf0[i ] + 64) >> 7) + d128[0]]; - acc += acc + g[((buf0[i + 1] + 64) >> 7) + d128[1]]; - acc += acc + g[((buf0[i + 2] + 64) >> 7) + d128[2]]; - acc += acc + g[((buf0[i + 3] + 64) >> 7) + d128[3]]; - acc += acc + g[((buf0[i + 4] + 64) >> 7) + d128[4]]; - acc += acc + g[((buf0[i + 5] + 64) >> 7) + d128[5]]; - acc += acc + g[((buf0[i + 6] + 64) >> 7) + d128[6]]; - acc += acc + g[((buf0[i + 7] + 64) >> 7) + d128[7]]; + int acc = 0; + + accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]); + accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]); + accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]); + accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]); + accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]); + accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]); + accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]); + accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]); + output_pixel(*dest++, acc); } } #undef output_pixel +#undef accumulate_bit #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h index 930435608b..18ec4d985a 100644 --- a/libswscale/swscale_internal.h +++ b/libswscale/swscale_internal.h @@ -358,11 +358,10 @@ typedef struct SwsContext { #define U_TEMP "11*8+4*4*256*2+24" #define V_TEMP "11*8+4*4*256*2+32" #define Y_TEMP "11*8+4*4*256*2+40" -#define ALP_MMX_FILTER_OFFSET "11*8+4*4*256*2+48" -#define UV_OFF_PX "11*8+4*4*256*3+48" -#define UV_OFF_BYTE "11*8+4*4*256*3+56" -#define DITHER16 "11*8+4*4*256*3+64" -#define DITHER32 "11*8+4*4*256*3+80" +#define UV_OFF_PX "11*8+4*4*256*2+48" +#define UV_OFF_BYTE "11*8+4*4*256*2+56" +#define DITHER16 "11*8+4*4*256*2+64" +#define DITHER32 "11*8+4*4*256*2+80" DECLARE_ALIGNED(8, uint64_t, redDither); DECLARE_ALIGNED(8, uint64_t, greenDither); @@ -384,7 +383,6 @@ typedef struct SwsContext { DECLARE_ALIGNED(8, uint64_t, u_temp); DECLARE_ALIGNED(8, uint64_t, v_temp); DECLARE_ALIGNED(8, uint64_t, y_temp); - int32_t alpMmxFilter[4 * MAX_FILTER_SIZE]; // alignment of these values is not necessary, but merely here // to maintain the same offset across x8632 and x86-64. Once we // use proper offset macros in the asm, they can be removed. @@ -423,6 +421,7 @@ typedef struct SwsContext { #if HAVE_VIS DECLARE_ALIGNED(8, uint64_t, sparc_coeffs)[10]; #endif + int32_t alpMmxFilter[4 * MAX_FILTER_SIZE]; int use_mmx_vfilter; /* function pointers for swScale() */ diff --git a/libswscale/x86/swscale_template.c b/libswscale/x86/swscale_template.c index d9e5cbbf44..b179184034 100644 --- a/libswscale/x86/swscale_template.c +++ b/libswscale/x86/swscale_template.c @@ -342,7 +342,7 @@ static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter, "movq %%mm2, "U_TEMP"(%0) \n\t" "movq %%mm4, "V_TEMP"(%0) \n\t" "movq %%mm5, "Y_TEMP"(%0) \n\t" - YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET) + YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET) "movq "Y_TEMP"(%0), %%mm5 \n\t" "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" @@ -372,7 +372,7 @@ static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter, if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { YSCALEYUV2PACKEDX YSCALEYUV2RGBX - YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7) + YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7) "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" "packuswb %%mm7, %%mm1 \n\t" @@ -1162,14 +1162,15 @@ static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2], * YV12 to RGB without scaling or interpolating */ static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *ubuf0 = ubuf[0]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster + const int16_t *ubuf1 = ubuf[0]; if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" @@ -1198,6 +1199,7 @@ static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0, ); } } else { + const int16_t *ubuf1 = ubuf[1]; if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" @@ -1229,14 +1231,15 @@ static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0, } static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *ubuf0 = ubuf[0]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster + const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1250,6 +1253,7 @@ static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0, "a" (&c->redDither) ); } else { + const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1266,14 +1270,15 @@ static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0, } static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *ubuf0 = ubuf[0]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster + const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1293,6 +1298,7 @@ static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, "a" (&c->redDither) ); } else { + const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1315,14 +1321,15 @@ static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, } static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *ubuf0 = ubuf[0]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster + const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1342,6 +1349,7 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, "a" (&c->redDither) ); } else { + const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1401,14 +1409,15 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c) static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *bguf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; + const int16_t *ubuf0 = ubuf[0]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster + const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" @@ -1421,6 +1430,7 @@ static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0, "a" (&c->redDither) ); } else { + const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" |