diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-02-24 02:57:18 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-02-24 02:57:18 +0100 |
commit | e2cc39b6096ed4353293252e3955417b7766f161 (patch) | |
tree | 0bc4d98c120dedcffb9b6e50943b4fc9e3c2a877 /libavcodec | |
parent | 32e74395a8e88dee1c149aeb36e7a21df431c181 (diff) | |
parent | 31632e73f47d25e2077fce729571259ee6354854 (diff) | |
download | ffmpeg-e2cc39b6096ed4353293252e3955417b7766f161.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master: (40 commits)
swf: check return values for av_get/new_packet().
wavpack: Don't shift minclip/maxclip
rtpenc: Expose the max packet size via an avoption
rtpenc: Move max_packet_size to a context variable
rtpenc: Add an option for not sending RTCP packets
lavc: drop encode() support for video.
snowenc: switch to encode2().
snowenc: don't abuse input picture for storing information.
a64multienc: switch to encode2().
a64multienc: don't write into output buffer when there's no output.
libxvid: switch to encode2().
tiffenc: switch to encode2().
tiffenc: properly forward error codes in encode_frame().
lavc: drop libdirac encoder.
gifenc: switch to encode2().
libvpxenc: switch to encode2().
flashsvenc: switch to encode2().
Remove libpostproc.
lcl: don't overwrite input memory.
swscale: take first/lastline over/underflows into account for MMX.
...
Conflicts:
.gitignore
Makefile
cmdutils.c
configure
doc/APIchanges
libavcodec/Makefile
libavcodec/allcodecs.c
libavcodec/libdiracenc.c
libavcodec/libxvidff.c
libavcodec/qtrleenc.c
libavcodec/tiffenc.c
libavcodec/utils.c
libavformat/mov.c
libavformat/movenc.c
libpostproc/Makefile
libpostproc/postprocess.c
libpostproc/postprocess.h
libpostproc/postprocess_altivec_template.c
libpostproc/postprocess_internal.h
libpostproc/postprocess_template.c
libswscale/swscale.c
libswscale/utils.c
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/Makefile | 3 | ||||
-rw-r--r-- | libavcodec/a64enc.h | 3 | ||||
-rw-r--r-- | libavcodec/a64multienc.c | 42 | ||||
-rw-r--r-- | libavcodec/ac3_parser.c | 2 | ||||
-rw-r--r-- | libavcodec/ac3dec.c | 2 | ||||
-rw-r--r-- | libavcodec/ac3tab.c | 2 | ||||
-rw-r--r-- | libavcodec/ac3tab.h | 2 | ||||
-rw-r--r-- | libavcodec/allcodecs.c | 2 | ||||
-rw-r--r-- | libavcodec/asv1.c | 34 | ||||
-rw-r--r-- | libavcodec/ffv1.c | 34 | ||||
-rw-r--r-- | libavcodec/flacdec.c | 13 | ||||
-rw-r--r-- | libavcodec/flashsvenc.c | 26 | ||||
-rw-r--r-- | libavcodec/gif.c | 25 | ||||
-rw-r--r-- | libavcodec/jpeglsenc.c | 31 | ||||
-rw-r--r-- | libavcodec/lcldec.c | 11 | ||||
-rw-r--r-- | libavcodec/lclenc.c | 26 | ||||
-rw-r--r-- | libavcodec/libdiracenc.c | 405 | ||||
-rw-r--r-- | libavcodec/libschroedingerenc.c | 43 | ||||
-rw-r--r-- | libavcodec/libtheoraenc.c | 25 | ||||
-rw-r--r-- | libavcodec/libvpxenc.c | 53 | ||||
-rw-r--r-- | libavcodec/libxavs.c | 118 | ||||
-rw-r--r-- | libavcodec/libxvidff.c | 60 | ||||
-rw-r--r-- | libavcodec/ljpegenc.c | 32 | ||||
-rw-r--r-- | libavcodec/qtrleenc.c | 30 | ||||
-rw-r--r-- | libavcodec/snowenc.c | 73 | ||||
-rw-r--r-- | libavcodec/tiffenc.c | 57 | ||||
-rw-r--r-- | libavcodec/truemotion2.c | 2 | ||||
-rw-r--r-- | libavcodec/utils.c | 48 | ||||
-rw-r--r-- | libavcodec/wavpack.c | 4 |
29 files changed, 499 insertions, 709 deletions
diff --git a/libavcodec/Makefile b/libavcodec/Makefile index 0b144c8635..f46708368f 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -603,7 +603,7 @@ OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \ flacdec.o flacdata.o flac.o \ mpegaudiodata.o vorbis_data.o OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o -OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o timecode.o +OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o @@ -624,7 +624,6 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o -OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o diff --git a/libavcodec/a64enc.h b/libavcodec/a64enc.h index fb559ba82b..4868461f40 100644 --- a/libavcodec/a64enc.h +++ b/libavcodec/a64enc.h @@ -50,6 +50,9 @@ typedef struct A64Context { uint8_t *mc_colram; uint8_t *mc_palette; int mc_pal_size; + + /* pts of the next packet that will be output */ + int64_t next_pts; } A64Context; #endif /* AVCODEC_A64ENC_H */ diff --git a/libavcodec/a64multienc.c b/libavcodec/a64multienc.c index 5a665d0592..ed8dde552c 100644 --- a/libavcodec/a64multienc.c +++ b/libavcodec/a64multienc.c @@ -28,6 +28,7 @@ #include "a64colors.h" #include "a64tables.h" #include "elbg.h" +#include "internal.h" #include "libavutil/intreadwrite.h" #define DITHERSTEPS 8 @@ -221,6 +222,8 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx) if (!avctx->codec_tag) avctx->codec_tag = AV_RL32("a64m"); + c->next_pts = AV_NOPTS_VALUE; + return 0; } @@ -239,11 +242,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra } } -static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, - int buf_size, void *data) +static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { A64Context *c = avctx->priv_data; - AVFrame *pict = data; AVFrame *const p = (AVFrame *) & c->picture; int frame; @@ -251,7 +253,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, int b_height; int b_width; - int req_size; + int req_size, ret; + uint8_t *buf; int *charmap = c->mc_charmap; uint8_t *colram = c->mc_colram; @@ -274,7 +277,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, } /* no data, means end encoding asap */ - if (!data) { + if (!pict) { /* all done, end encoding */ if (!c->mc_lifetime) return 0; /* no more frames in queue, prepare to flush remaining frames */ @@ -292,6 +295,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, p->key_frame = 1; to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter); c->mc_frame_counter++; + if (c->next_pts == AV_NOPTS_VALUE) + c->next_pts = pict->pts; /* lifetime is not reached so wait for next frame first */ return 0; } @@ -302,6 +307,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, req_size = 0; /* any frames to encode? */ if (c->mc_lifetime) { + req_size = charset_size + c->mc_lifetime*(screen_size + colram_size); + if ((ret = ff_alloc_packet(pkt, req_size)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", req_size); + return ret; + } + buf = pkt->data; + /* calc optimal new charset + charmaps */ ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx); ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx); @@ -310,15 +322,12 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, render_charset(avctx, charset, colram); /* copy charset to buf */ - memcpy(buf,charset, charset_size); + memcpy(buf, charset, charset_size); /* advance pointers */ buf += charset_size; charset += charset_size; - req_size += charset_size; } - /* no charset so clean buf */ - else memset(buf, 0, charset_size); /* write x frames to buf */ for (frame = 0; frame < c->mc_lifetime; frame++) { @@ -351,11 +360,12 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf, /* reset counter */ c->mc_frame_counter = 0; - if (req_size > buf_size) { - av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", req_size, buf_size); - return -1; - } - return req_size; + pkt->pts = pkt->dts = c->next_pts; + c->next_pts = AV_NOPTS_VALUE; + + pkt->size = req_size; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = !!req_size; } return 0; } @@ -366,7 +376,7 @@ AVCodec ff_a64multi_encoder = { .id = CODEC_ID_A64_MULTI, .priv_data_size = sizeof(A64Context), .init = a64multi_init_encoder, - .encode = a64multi_encode_frame, + .encode2 = a64multi_encode_frame, .close = a64multi_close_encoder, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"), @@ -379,7 +389,7 @@ AVCodec ff_a64multi5_encoder = { .id = CODEC_ID_A64_MULTI5, .priv_data_size = sizeof(A64Context), .init = a64multi_init_encoder, - .encode = a64multi_encode_frame, + .encode2 = a64multi_encode_frame, .close = a64multi_close_encoder, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"), diff --git a/libavcodec/ac3_parser.c b/libavcodec/ac3_parser.c index 14ca196aaf..83cc4e0e36 100644 --- a/libavcodec/ac3_parser.c +++ b/libavcodec/ac3_parser.c @@ -134,7 +134,7 @@ int avpriv_ac3_parse_header(GetBitContext *gbc, AC3HeaderInfo *hdr) (hdr->num_blocks * 256.0)); hdr->channels = ff_ac3_channels_tab[hdr->channel_mode] + hdr->lfe_on; } - hdr->channel_layout = ff_ac3_channel_layout_tab[hdr->channel_mode]; + hdr->channel_layout = avpriv_ac3_channel_layout_tab[hdr->channel_mode]; if (hdr->lfe_on) hdr->channel_layout |= AV_CH_LOW_FREQUENCY; diff --git a/libavcodec/ac3dec.c b/libavcodec/ac3dec.c index be17efb24d..31891b5807 100644 --- a/libavcodec/ac3dec.c +++ b/libavcodec/ac3dec.c @@ -1378,7 +1378,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, avctx->request_channels < s->channels) { s->out_channels = avctx->request_channels; s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; - s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode]; + s->channel_layout = avpriv_ac3_channel_layout_tab[s->output_mode]; } avctx->channels = s->out_channels; avctx->channel_layout = s->channel_layout; diff --git a/libavcodec/ac3tab.c b/libavcodec/ac3tab.c index 3b3e715655..ccf04ec016 100644 --- a/libavcodec/ac3tab.c +++ b/libavcodec/ac3tab.c @@ -84,7 +84,7 @@ const uint8_t ff_ac3_channels_tab[8] = { /** * Map audio coding mode (acmod) to channel layout mask. */ -const uint16_t ff_ac3_channel_layout_tab[8] = { +const uint16_t avpriv_ac3_channel_layout_tab[8] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_STEREO, diff --git a/libavcodec/ac3tab.h b/libavcodec/ac3tab.h index aa13c8f6e5..d6e0eed86a 100644 --- a/libavcodec/ac3tab.h +++ b/libavcodec/ac3tab.h @@ -33,7 +33,7 @@ extern const uint16_t ff_ac3_frame_size_tab[38][3]; extern const uint8_t ff_ac3_channels_tab[8]; -extern const uint16_t ff_ac3_channel_layout_tab[8]; +extern const uint16_t avpriv_ac3_channel_layout_tab[8]; extern const uint8_t ff_ac3_enc_channel_map[8][2][6]; extern const uint8_t ff_ac3_dec_channel_map[8][2][6]; extern const uint16_t ff_ac3_sample_rate_tab[3]; diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index ef9a5ae52b..18121ba808 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -400,7 +400,7 @@ void avcodec_register_all(void) /* external libraries */ REGISTER_ENCODER (LIBAACPLUS, libaacplus); REGISTER_DECODER (LIBCELT, libcelt); - REGISTER_ENCDEC (LIBDIRAC, libdirac); + REGISTER_DECODER (LIBDIRAC, libdirac); REGISTER_ENCODER (LIBFAAC, libfaac); REGISTER_ENCDEC (LIBGSM, libgsm); REGISTER_ENCDEC (LIBGSM_MS, libgsm_ms); diff --git a/libavcodec/asv1.c b/libavcodec/asv1.c index 563c92c335..4bcc8d72b1 100644 --- a/libavcodec/asv1.c +++ b/libavcodec/asv1.c @@ -325,10 +325,12 @@ static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){ return 0; } +#define MAX_MB_SIZE (30*16*16*3/2/8) + static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){ int i; - if(a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < 30*16*16*3/2/8){ + if (a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < MAX_MB_SIZE) { av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } @@ -461,14 +463,22 @@ static int decode_frame(AVCodecContext *avctx, } #if CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ ASV1Context * const a = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p= &a->picture; - int size; + int size, ret; int mb_x, mb_y; - init_put_bits(&a->pb, buf, buf_size); + if (!pkt->data && + (ret = av_new_packet(pkt, a->mb_height*a->mb_width*MAX_MB_SIZE + + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + + init_put_bits(&a->pb, pkt->data, pkt->size); *p = *pict; p->pict_type= AV_PICTURE_TYPE_I; @@ -505,14 +515,18 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, size= put_bits_count(&a->pb)/32; if(avctx->codec_id == CODEC_ID_ASV1) - a->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); + a->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size); else{ int i; for(i=0; i<4*size; i++) - buf[i]= av_reverse[ buf[i] ]; + pkt->data[i] = av_reverse[pkt->data[i]]; } - return size*4; + pkt->size = size*4; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } #endif /* CONFIG_ASV1_ENCODER || CONFIG_ASV2_ENCODER */ @@ -634,7 +648,7 @@ AVCodec ff_asv1_encoder = { .id = CODEC_ID_ASV1, .priv_data_size = sizeof(ASV1Context), .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, //encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("ASUS V1"), @@ -648,7 +662,7 @@ AVCodec ff_asv2_encoder = { .id = CODEC_ID_ASV2, .priv_data_size = sizeof(ASV1Context), .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, //encode_end, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("ASUS V2"), diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c index 1535c5e462..4c0ea4517e 100644 --- a/libavcodec/ffv1.c +++ b/libavcodec/ffv1.c @@ -1135,17 +1135,25 @@ static int encode_slice(AVCodecContext *c, void *arg){ return 0; } -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ FFV1Context *f = avctx->priv_data; RangeCoder * const c= &f->slice_context[0]->c; - AVFrame *pict = data; AVFrame * const p= &f->picture; int used_count= 0; uint8_t keystate=128; uint8_t *buf_p; - int i; + int i, ret; + + if (!pkt->data && + (ret = av_new_packet(pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8 + + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } - ff_init_range_encoder(c, buf, buf_size); + ff_init_range_encoder(c, pkt->data, pkt->size); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); *p = *pict; @@ -1165,7 +1173,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, if(!f->ac){ used_count += ff_rac_terminate(c); //printf("pos=%d\n", used_count); - init_put_bits(&f->slice_context[0]->pb, buf + used_count, buf_size - used_count); + init_put_bits(&f->slice_context[0]->pb, pkt->data + used_count, pkt->size - used_count); }else if (f->ac>1){ int i; for(i=1; i<256; i++){ @@ -1176,8 +1184,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, for(i=1; i<f->slice_count; i++){ FFV1Context *fs= f->slice_context[i]; - uint8_t *start= buf + (buf_size-used_count)*i/f->slice_count; - int len= buf_size/f->slice_count; + uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count; + int len = pkt->size/f->slice_count; if(fs->ac){ ff_init_range_encoder(&fs->c, start, len); @@ -1187,7 +1195,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, } avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*)); - buf_p=buf; + buf_p = pkt->data; for(i=0; i<f->slice_count; i++){ FFV1Context *fs= f->slice_context[i]; int bytes; @@ -1202,7 +1210,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, used_count= 0; } if(i>0){ - av_assert0(bytes < buf_size/f->slice_count); + av_assert0(bytes < pkt->size/f->slice_count); memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes); av_assert0(bytes < (1<<24)); AV_WB24(buf_p+bytes, bytes); @@ -1255,7 +1263,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, avctx->stats_out[0] = '\0'; f->picture_number++; - return buf_p-buf; + pkt->size = buf_p - pkt->data; + pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame; + *got_packet = 1; + + return 0; } #endif /* CONFIG_FFV1_ENCODER */ @@ -1843,7 +1855,7 @@ AVCodec ff_ffv1_encoder = { .id = CODEC_ID_FFV1, .priv_data_size = sizeof(FFV1Context), .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, .close = common_end, .capabilities = CODEC_CAP_SLICE_THREADS, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE}, diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c index dc6bcaab57..87e53edce6 100644 --- a/libavcodec/flacdec.c +++ b/libavcodec/flacdec.c @@ -33,6 +33,7 @@ #include <limits.h> +#include "libavutil/audioconvert.h" #include "libavutil/crc.h" #include "avcodec.h" #include "internal.h" @@ -62,6 +63,15 @@ typedef struct FLACContext { int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples } FLACContext; +static const int64_t flac_channel_layouts[6] = { + AV_CH_LAYOUT_MONO, + AV_CH_LAYOUT_STEREO, + AV_CH_LAYOUT_SURROUND, + AV_CH_LAYOUT_QUAD, + AV_CH_LAYOUT_5POINT0, + AV_CH_LAYOUT_5POINT1 +}; + static void allocate_buffers(FLACContext *s); int avpriv_flac_is_extradata_valid(AVCodecContext *avctx, @@ -120,6 +130,9 @@ static av_cold int flac_decode_init(AVCodecContext *avctx) avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; + if (avctx->channels <= FF_ARRAY_ELEMS(flac_channel_layouts)) + avctx->channel_layout = flac_channel_layouts[avctx->channels - 1]; + return 0; } diff --git a/libavcodec/flashsvenc.c b/libavcodec/flashsvenc.c index 77290e866f..f5b3015fbf 100644 --- a/libavcodec/flashsvenc.c +++ b/libavcodec/flashsvenc.c @@ -49,6 +49,7 @@ #include <zlib.h> #include "avcodec.h" +#include "internal.h" #include "put_bits.h" #include "bytestream.h" @@ -194,11 +195,10 @@ static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf, } -static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, - int buf_size, void *data) +static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { FlashSVContext * const s = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p = &s->frame; uint8_t *pfptr; int res; @@ -228,15 +228,15 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, I_frame = 1; } - if (buf_size < s->image_width * s->image_height * 3) { + if ((res = ff_alloc_packet(pkt, s->image_width * s->image_height * 3)) < 0) { //Conservative upper bound check for compressed data - av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", - buf_size, s->image_width * s->image_height * 3); - return -1; + av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", + s->image_width * s->image_height * 3); + return res; } - res = encode_bitstream(s, p, buf, buf_size, opt_w * 16, opt_h * 16, - pfptr, &I_frame); + pkt->size = encode_bitstream(s, p, pkt->data, pkt->size, opt_w * 16, opt_h * 16, + pfptr, &I_frame); //save the current frame if (p->linesize[0] > 0) @@ -259,7 +259,11 @@ static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, avctx->coded_frame = p; - return res; + if (p->key_frame) + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } static av_cold int flashsv_encode_end(AVCodecContext *avctx) @@ -281,7 +285,7 @@ AVCodec ff_flashsv_encoder = { .id = CODEC_ID_FLASHSV, .priv_data_size = sizeof(FlashSVContext), .init = flashsv_encode_init, - .encode = flashsv_encode_frame, + .encode2 = flashsv_encode_frame, .close = flashsv_encode_end, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"), diff --git a/libavcodec/gif.c b/libavcodec/gif.c index 830a059422..875c5b15dc 100644 --- a/libavcodec/gif.c +++ b/libavcodec/gif.c @@ -43,6 +43,7 @@ #include "avcodec.h" #include "bytestream.h" +#include "internal.h" #include "lzw.h" /* The GIF format uses reversed order for bitstreams... */ @@ -155,20 +156,32 @@ static av_cold int gif_encode_init(AVCodecContext *avctx) } /* better than nothing gif encoder */ -static int gif_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data) +static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { GIFContext *s = avctx->priv_data; - AVFrame *pict = data; AVFrame *const p = (AVFrame *)&s->picture; - uint8_t *outbuf_ptr = outbuf; - uint8_t *end = outbuf + buf_size; + uint8_t *outbuf_ptr, *end; + int ret; + + if ((ret = ff_alloc_packet(pkt, avctx->width*avctx->height*7/5 + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + outbuf_ptr = pkt->data; + end = pkt->data + pkt->size; *p = *pict; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]); gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]); - return outbuf_ptr - outbuf; + + pkt->size = outbuf_ptr - pkt->data; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } static int gif_encode_close(AVCodecContext *avctx) @@ -186,7 +199,7 @@ AVCodec ff_gif_encoder = { .id = CODEC_ID_GIF, .priv_data_size = sizeof(GIFContext), .init = gif_encode_init, - .encode = gif_encode_frame, + .encode2 = gif_encode_frame, .close = gif_encode_close, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"), diff --git a/libavcodec/jpeglsenc.c b/libavcodec/jpeglsenc.c index d72287b296..d5135adb30 100644 --- a/libavcodec/jpeglsenc.c +++ b/libavcodec/jpeglsenc.c @@ -28,6 +28,7 @@ #include "avcodec.h" #include "get_bits.h" #include "golomb.h" +#include "internal.h" #include "mathops.h" #include "dsputil.h" #include "mjpeg.h" @@ -227,23 +228,19 @@ static void ls_store_lse(JLSState *state, PutBitContext *pb){ put_bits(pb, 16, state->reset); } -static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ JpeglsContext * const s = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; const int near = avctx->prediction_method; PutBitContext pb, pb2; GetBitContext gb; uint8_t *buf2, *zero, *cur, *last; JLSState *state; - int i, size; + int i, size, ret; int comps; - buf2 = av_malloc(buf_size); - - init_put_bits(&pb, buf, buf_size); - init_put_bits(&pb2, buf2, buf_size); - *p = *pict; p->pict_type= AV_PICTURE_TYPE_I; p->key_frame= 1; @@ -253,6 +250,17 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_ else comps = 3; + if ((ret = ff_alloc_packet(pkt, avctx->width*avctx->height*comps*4 + + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + + buf2 = av_malloc(pkt->size); + + init_put_bits(&pb, pkt->data, pkt->size); + init_put_bits(&pb2, buf2, pkt->size); + /* write our own JPEG header, can't use mjpeg_picture_header */ put_marker(&pb, SOI); put_marker(&pb, SOF48); @@ -366,7 +374,10 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_ emms_c(); - return put_bits_count(&pb) >> 3; + pkt->size = put_bits_count(&pb) >> 3; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + return 0; } static av_cold int encode_init_ls(AVCodecContext *ctx) { @@ -388,7 +399,7 @@ AVCodec ff_jpegls_encoder = { //FIXME avoid MPV_* lossless JPEG should not need .id = CODEC_ID_JPEGLS, .priv_data_size = sizeof(JpeglsContext), .init = encode_init_ls, - .encode = encode_picture_ls, + .encode2 = encode_picture_ls, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"), }; diff --git a/libavcodec/lcldec.c b/libavcodec/lcldec.c index 57b04f79f1..901d9421f4 100644 --- a/libavcodec/lcldec.c +++ b/libavcodec/lcldec.c @@ -242,9 +242,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac * gives a file with ZLIB fourcc, but frame is really uncompressed. * To be sure that's true check also frame size */ if (c->compression == COMP_ZLIB_NORMAL && c->imgtype == IMGTYPE_RGB24 && - len == width * height * 3) - break; - if (c->flags & FLAG_MULTITHREAD) { + len == width * height * 3) { + if (c->flags & FLAG_PNGFILTER) { + memcpy(c->decomp_buf, encoded, len); + encoded = c->decomp_buf; + } else { + break; + } + } else if (c->flags & FLAG_MULTITHREAD) { int ret; mthread_inlen = AV_RL32(encoded); mthread_inlen = FFMIN(mthread_inlen, len - 8); diff --git a/libavcodec/lclenc.c b/libavcodec/lclenc.c index 9f66960910..a656986da9 100644 --- a/libavcodec/lclenc.c +++ b/libavcodec/lclenc.c @@ -68,12 +68,20 @@ typedef struct LclEncContext { * Encode a frame * */ -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ LclEncContext *c = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p = &c->pic; - int i; + int i, ret; int zret; // Zlib return code + int max_size = deflateBound(&c->zstream, avctx->width * avctx->height * 3); + + if (!pkt->data && + (ret = av_new_packet(pkt, max_size)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error allocating packet of size %d.\n", max_size); + return ret; + } *p = *pict; p->pict_type= AV_PICTURE_TYPE_I; @@ -89,8 +97,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, av_log(avctx, AV_LOG_ERROR, "Deflate reset error: %d\n", zret); return -1; } - c->zstream.next_out = buf; - c->zstream.avail_out = buf_size; + c->zstream.next_out = pkt->data; + c->zstream.avail_out = pkt->size; for(i = avctx->height - 1; i >= 0; i--) { c->zstream.next_in = p->data[0]+p->linesize[0]*i; @@ -107,7 +115,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, return -1; } - return c->zstream.total_out; + pkt->size = c->zstream.total_out; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } /* @@ -176,7 +188,7 @@ AVCodec ff_zlib_encoder = { .id = CODEC_ID_ZLIB, .priv_data_size = sizeof(LclEncContext), .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, .close = encode_end, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_BGR24, PIX_FMT_NONE }, .long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"), diff --git a/libavcodec/libdiracenc.c b/libavcodec/libdiracenc.c deleted file mode 100644 index 385bce9018..0000000000 --- a/libavcodec/libdiracenc.c +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Dirac encoding support via libdirac library - * Copyright (c) 2005 BBC, Andrew Kennedy <dirac at rd dot bbc dot co dot uk> - * Copyright (c) 2006-2008 BBC, Anuradha Suraparaju <asuraparaju at gmail dot com > - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** -* @file -* Dirac encoding support via libdirac library; more details about the -* Dirac project can be found at http://dirac.sourceforge.net/. -* The libdirac_encoder library implements Dirac specification version 2.2 -* (http://dirac.sourceforge.net/specification.html). -*/ - -#include "libdirac_libschro.h" -#include "libdirac.h" - -#undef NDEBUG -#include <assert.h> - - -#include <libdirac_encoder/dirac_encoder.h> - -/** Dirac encoder private data */ -typedef struct DiracEncoderParams { - /** Dirac encoder context */ - dirac_encoder_context_t enc_ctx; - - /** frame being encoded */ - AVFrame picture; - - /** frame size */ - int frame_size; - - /** Dirac encoder handle */ - dirac_encoder_t* p_encoder; - - /** input frame buffer */ - unsigned char *p_in_frame_buf; - - /** buffer to store encoder output before writing it to the frame queue */ - unsigned char *enc_buf; - - /** size of encoder buffer */ - int enc_buf_size; - - /** queue storing encoded frames */ - DiracSchroQueue enc_frame_queue; - - /** end of sequence signalled by user, 0 - false, 1 - true */ - int eos_signalled; - - /** end of sequence returned by encoder, 0 - false, 1 - true */ - int eos_pulled; -} DiracEncoderParams; - -/** -* Works out Dirac-compatible chroma format. -*/ -static dirac_chroma_t GetDiracChromaFormat(enum PixelFormat ff_pix_fmt) -{ - int num_formats = sizeof(dirac_pixel_format_map) / - sizeof(dirac_pixel_format_map[0]); - int idx; - - for (idx = 0; idx < num_formats; ++idx) - if (dirac_pixel_format_map[idx].ff_pix_fmt == ff_pix_fmt) - return dirac_pixel_format_map[idx].dirac_pix_fmt; - return formatNK; -} - -/** -* Dirac video preset table. Ensure that this tables matches up correctly -* with the ff_dirac_schro_video_format_info table in libdirac_libschro.c. -*/ -static const VideoFormat ff_dirac_video_formats[]={ - VIDEO_FORMAT_CUSTOM , - VIDEO_FORMAT_QSIF525 , - VIDEO_FORMAT_QCIF , - VIDEO_FORMAT_SIF525 , - VIDEO_FORMAT_CIF , - VIDEO_FORMAT_4SIF525 , - VIDEO_FORMAT_4CIF , - VIDEO_FORMAT_SD_480I60 , - VIDEO_FORMAT_SD_576I50 , - VIDEO_FORMAT_HD_720P60 , - VIDEO_FORMAT_HD_720P50 , - VIDEO_FORMAT_HD_1080I60 , - VIDEO_FORMAT_HD_1080I50 , - VIDEO_FORMAT_HD_1080P60 , - VIDEO_FORMAT_HD_1080P50 , - VIDEO_FORMAT_DIGI_CINEMA_2K24 , - VIDEO_FORMAT_DIGI_CINEMA_4K24 , -}; - -/** -* Returns the video format preset matching the input video dimensions and -* time base. -*/ -static VideoFormat GetDiracVideoFormatPreset(AVCodecContext *avccontext) -{ - unsigned int num_formats = sizeof(ff_dirac_video_formats) / - sizeof(ff_dirac_video_formats[0]); - - unsigned int idx = ff_dirac_schro_get_video_format_idx(avccontext); - - return (idx < num_formats) ? - ff_dirac_video_formats[idx] : VIDEO_FORMAT_CUSTOM; -} - -static av_cold int libdirac_encode_init(AVCodecContext *avccontext) -{ - - DiracEncoderParams* p_dirac_params = avccontext->priv_data; - int no_local = 1; - int verbose = avccontext->debug; - VideoFormat preset; - - /* get Dirac preset */ - preset = GetDiracVideoFormatPreset(avccontext); - - /* initialize the encoder context */ - dirac_encoder_context_init(&p_dirac_params->enc_ctx, preset); - - p_dirac_params->enc_ctx.src_params.chroma = GetDiracChromaFormat(avccontext->pix_fmt); - - if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) { - av_log(avccontext, AV_LOG_ERROR, - "Unsupported pixel format %d. This codec supports only " - "Planar YUV formats (yuv420p, yuv422p, yuv444p\n", - avccontext->pix_fmt); - return -1; - } - - p_dirac_params->enc_ctx.src_params.frame_rate.numerator = avccontext->time_base.den; - p_dirac_params->enc_ctx.src_params.frame_rate.denominator = avccontext->time_base.num; - - p_dirac_params->enc_ctx.src_params.width = avccontext->width; - p_dirac_params->enc_ctx.src_params.height = avccontext->height; - - p_dirac_params->frame_size = avpicture_get_size(avccontext->pix_fmt, - avccontext->width, - avccontext->height); - - avccontext->coded_frame = &p_dirac_params->picture; - - if (no_local) { - p_dirac_params->enc_ctx.decode_flag = 0; - p_dirac_params->enc_ctx.instr_flag = 0; - } else { - p_dirac_params->enc_ctx.decode_flag = 1; - p_dirac_params->enc_ctx.instr_flag = 1; - } - - /* Intra-only sequence */ - if (!avccontext->gop_size) { - p_dirac_params->enc_ctx.enc_params.num_L1 = 0; - if (avccontext->coder_type == FF_CODER_TYPE_VLC) - p_dirac_params->enc_ctx.enc_params.using_ac = 0; - } else - avccontext->has_b_frames = 1; - - if (avccontext->flags & CODEC_FLAG_QSCALE) { - if (avccontext->global_quality) { - p_dirac_params->enc_ctx.enc_params.qf = avccontext->global_quality - / (FF_QP2LAMBDA * 10.0); - /* if it is not default bitrate then send target rate. */ - if (avccontext->bit_rate >= 1000 && - avccontext->bit_rate != 200000) - p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate - / 1000; - } else - p_dirac_params->enc_ctx.enc_params.lossless = 1; - } else if (avccontext->bit_rate >= 1000) - p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000; - - if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) && - avccontext->bit_rate == 200000) - p_dirac_params->enc_ctx.enc_params.trate = 0; - - if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) - /* all material can be coded as interlaced or progressive - * irrespective of the type of source material */ - p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1; - - p_dirac_params->p_encoder = dirac_encoder_init(&p_dirac_params->enc_ctx, - verbose); - - if (!p_dirac_params->p_encoder) { - av_log(avccontext, AV_LOG_ERROR, - "Unrecoverable Error: dirac_encoder_init failed. "); - return EXIT_FAILURE; - } - - /* allocate enough memory for the incoming data */ - p_dirac_params->p_in_frame_buf = av_malloc(p_dirac_params->frame_size); - - /* initialize the encoded frame queue */ - ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue); - - return 0; -} - -static void DiracFreeFrame(void *data) -{ - DiracSchroEncodedFrame *enc_frame = data; - - av_freep(&enc_frame->p_encbuf); - av_free(enc_frame); -} - -static int libdirac_encode_frame(AVCodecContext *avccontext, - unsigned char *frame, - int buf_size, void *data) -{ - int enc_size = 0; - dirac_encoder_state_t state; - DiracEncoderParams *p_dirac_params = avccontext->priv_data; - DiracSchroEncodedFrame *p_frame_output = NULL; - DiracSchroEncodedFrame *p_next_output_frame = NULL; - int go = 1; - int last_frame_in_sequence = 0; - - if (!data) { - /* push end of sequence if not already signalled */ - if (!p_dirac_params->eos_signalled) { - dirac_encoder_end_sequence(p_dirac_params->p_encoder); - p_dirac_params->eos_signalled = 1; - } - } else { - - /* Allocate frame data to Dirac input buffer. - * Input line size may differ from what the codec supports, - * especially when transcoding from one format to another. - * So use avpicture_layout to copy the frame. */ - avpicture_layout((AVPicture *)data, avccontext->pix_fmt, - avccontext->width, avccontext->height, - p_dirac_params->p_in_frame_buf, - p_dirac_params->frame_size); - - /* load next frame */ - if (dirac_encoder_load(p_dirac_params->p_encoder, - p_dirac_params->p_in_frame_buf, - p_dirac_params->frame_size) < 0) { - av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error." - " dirac_encoder_load failed...\n"); - return -1; - } - } - - if (p_dirac_params->eos_pulled) - go = 0; - - while (go) { - p_dirac_params->p_encoder->enc_buf.buffer = frame; - p_dirac_params->p_encoder->enc_buf.size = buf_size; - /* process frame */ - state = dirac_encoder_output(p_dirac_params->p_encoder); - - switch (state) { - case ENC_STATE_AVAIL: - case ENC_STATE_EOS: - assert(p_dirac_params->p_encoder->enc_buf.size > 0); - - /* All non-frame data is prepended to actual frame data to - * be able to set the pts correctly. So we don't write data - * to the frame output queue until we actually have a frame - */ - - p_dirac_params->enc_buf = av_realloc(p_dirac_params->enc_buf, - p_dirac_params->enc_buf_size + - p_dirac_params->p_encoder->enc_buf.size); - memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size, - p_dirac_params->p_encoder->enc_buf.buffer, - p_dirac_params->p_encoder->enc_buf.size); - - p_dirac_params->enc_buf_size += p_dirac_params->p_encoder->enc_buf.size; - - if (state == ENC_STATE_EOS) { - p_dirac_params->eos_pulled = 1; - go = 0; - } - - /* If non-frame data, don't output it until it we get an - * encoded frame back from the encoder. */ - if (p_dirac_params->p_encoder->enc_pparams.pnum == -1) - break; - - /* create output frame */ - p_frame_output = av_mallocz(sizeof(DiracSchroEncodedFrame)); - /* set output data */ - p_frame_output->size = p_dirac_params->enc_buf_size; - p_frame_output->p_encbuf = p_dirac_params->enc_buf; - p_frame_output->frame_num = p_dirac_params->p_encoder->enc_pparams.pnum; - - if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE && - p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE) - p_frame_output->key_frame = 1; - - ff_dirac_schro_queue_push_back(&p_dirac_params->enc_frame_queue, - p_frame_output); - - p_dirac_params->enc_buf_size = 0; - p_dirac_params->enc_buf = NULL; - break; - - case ENC_STATE_BUFFER: - go = 0; - break; - - case ENC_STATE_INVALID: - av_log(avccontext, AV_LOG_ERROR, - "Unrecoverable Dirac Encoder Error. Quitting...\n"); - return -1; - - default: - av_log(avccontext, AV_LOG_ERROR, "Unknown Dirac Encoder state\n"); - return -1; - } - } - - /* copy 'next' frame in queue */ - - if (p_dirac_params->enc_frame_queue.size == 1 && p_dirac_params->eos_pulled) - last_frame_in_sequence = 1; - - p_next_output_frame = ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue); - - if (!p_next_output_frame) - return 0; - - memcpy(frame, p_next_output_frame->p_encbuf, p_next_output_frame->size); - avccontext->coded_frame->key_frame = p_next_output_frame->key_frame; - /* Use the frame number of the encoded frame as the pts. It is OK to do - * so since Dirac is a constant framerate codec. It expects input to be - * of constant framerate. */ - avccontext->coded_frame->pts = p_next_output_frame->frame_num; - enc_size = p_next_output_frame->size; - - /* Append the end of sequence information to the last frame in the - * sequence. */ - if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) { - memcpy(frame + enc_size, p_dirac_params->enc_buf, - p_dirac_params->enc_buf_size); - enc_size += p_dirac_params->enc_buf_size; - av_freep(&p_dirac_params->enc_buf); - p_dirac_params->enc_buf_size = 0; - } - - /* free frame */ - DiracFreeFrame(p_next_output_frame); - - return enc_size; -} - -static av_cold int libdirac_encode_close(AVCodecContext *avccontext) -{ - DiracEncoderParams *p_dirac_params = avccontext->priv_data; - - /* close the encoder */ - dirac_encoder_close(p_dirac_params->p_encoder); - - /* free data in the output frame queue */ - ff_dirac_schro_queue_free(&p_dirac_params->enc_frame_queue, - DiracFreeFrame); - - /* free the encoder buffer */ - if (p_dirac_params->enc_buf_size) - av_freep(&p_dirac_params->enc_buf); - - /* free the input frame buffer */ - av_freep(&p_dirac_params->p_in_frame_buf); - - return 0; -} - - -AVCodec ff_libdirac_encoder = { - .name = "libdirac", - .type = AVMEDIA_TYPE_VIDEO, - .id = CODEC_ID_DIRAC, - .priv_data_size = sizeof(DiracEncoderParams), - .init = libdirac_encode_init, - .encode = libdirac_encode_frame, - .close = libdirac_encode_close, - .capabilities = CODEC_CAP_DELAY, - .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, - .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), -}; diff --git a/libavcodec/libschroedingerenc.c b/libavcodec/libschroedingerenc.c index 27267cd0e9..02ffac77f7 100644 --- a/libavcodec/libschroedingerenc.c +++ b/libavcodec/libschroedingerenc.c @@ -35,6 +35,7 @@ #include <schroedinger/schrovideoformat.h> #include "avcodec.h" +#include "internal.h" #include "libdirac_libschro.h" #include "libschroedinger.h" #include "bytestream.h" @@ -71,6 +72,9 @@ typedef struct SchroEncoderParams { /** end of sequence pulled */ int eos_pulled; + + /* counter for frames submitted to encoder, used as dts */ + int64_t dts; } SchroEncoderParams; /** @@ -175,6 +179,7 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) schro_encoder_setting_set_double(p_schro_params->encoder, "au_distance", avccontext->gop_size); avccontext->has_b_frames = 1; + p_schro_params->dts = -1; } /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ @@ -236,7 +241,7 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) } static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext, - void *in_data) + const AVFrame *frame) { SchroEncoderParams *p_schro_params = avccontext->priv_data; SchroFrame *in_frame; @@ -246,7 +251,7 @@ static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext, in_frame = ff_create_schro_frame(avccontext, p_schro_params->frame_format); if (in_frame) - avpicture_layout((AVPicture *)in_data, avccontext->pix_fmt, + avpicture_layout((const AVPicture *)frame, avccontext->pix_fmt, avccontext->width, avccontext->height, in_frame->components[0].data, p_schro_params->frame_size); @@ -262,9 +267,8 @@ static void SchroedingerFreeFrame(void *data) av_free(enc_frame); } -static int libschroedinger_encode_frame(AVCodecContext *avccontext, - unsigned char *frame, - int buf_size, void *data) +static int libschroedinger_encode_frame(AVCodecContext *avccontext, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { int enc_size = 0; SchroEncoderParams *p_schro_params = avccontext->priv_data; @@ -275,8 +279,9 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, int presentation_frame; int parse_code; int last_frame_in_sequence = 0; + int pkt_size, ret; - if (!data) { + if (!frame) { /* Push end of sequence if not already signalled. */ if (!p_schro_params->eos_signalled) { schro_encoder_end_of_stream(encoder); @@ -285,7 +290,7 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, } else { /* Allocate frame data to schro input buffer. */ SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext, - data); + frame); /* Load next frame. */ schro_encoder_push_frame(encoder, in_frame); } @@ -373,28 +378,42 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, if (!p_frame_output) return 0; - memcpy(frame, p_frame_output->p_encbuf, p_frame_output->size); + pkt_size = p_frame_output->size; + if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) + pkt_size += p_schro_params->enc_buf_size; + if ((ret = ff_alloc_packet(pkt, pkt_size)) < 0) { + av_log(avccontext, AV_LOG_ERROR, "Error getting output packet of size %d.\n", pkt_size); + goto error; + } + + memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size); avccontext->coded_frame->key_frame = p_frame_output->key_frame; /* Use the frame number of the encoded frame as the pts. It is OK to * do so since Dirac is a constant frame rate codec. It expects input * to be of constant frame rate. */ + pkt->pts = avccontext->coded_frame->pts = p_frame_output->frame_num; + pkt->dts = p_schro_params->dts++; enc_size = p_frame_output->size; /* Append the end of sequence information to the last frame in the * sequence. */ if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) { - memcpy(frame + enc_size, p_schro_params->enc_buf, + memcpy(pkt->data + enc_size, p_schro_params->enc_buf, p_schro_params->enc_buf_size); enc_size += p_schro_params->enc_buf_size; av_freep(&p_schro_params->enc_buf); p_schro_params->enc_buf_size = 0; } + if (p_frame_output->key_frame) + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + +error: /* free frame */ SchroedingerFreeFrame(p_frame_output); - - return enc_size; + return ret; } @@ -427,7 +446,7 @@ AVCodec ff_libschroedinger_encoder = { .id = CODEC_ID_DIRAC, .priv_data_size = sizeof(SchroEncoderParams), .init = libschroedinger_encode_init, - .encode = libschroedinger_encode_frame, + .encode2 = libschroedinger_encode_frame, .close = libschroedinger_encode_close, .capabilities = CODEC_CAP_DELAY, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, diff --git a/libavcodec/libtheoraenc.c b/libavcodec/libtheoraenc.c index f9078bafa5..30a4b285ff 100644 --- a/libavcodec/libtheoraenc.c +++ b/libavcodec/libtheoraenc.c @@ -35,6 +35,7 @@ #include "libavutil/log.h" #include "libavutil/base64.h" #include "avcodec.h" +#include "internal.h" /* libtheora includes */ #include <theora/theoraenc.h> @@ -260,14 +261,13 @@ static av_cold int encode_init(AVCodecContext* avc_context) return 0; } -static int encode_frame(AVCodecContext* avc_context, uint8_t *outbuf, - int buf_size, void *data) +static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { th_ycbcr_buffer t_yuv_buffer; TheoraContext *h = avc_context->priv_data; - AVFrame *frame = data; ogg_packet o_packet; - int result, i; + int result, i, ret; // EOS, finish and get 1st pass stats if applicable if (!frame) { @@ -328,18 +328,21 @@ static int encode_frame(AVCodecContext* avc_context, uint8_t *outbuf, } /* Copy ogg_packet content out to buffer */ - if (buf_size < o_packet.bytes) { - av_log(avc_context, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; + if ((ret = ff_alloc_packet(pkt, o_packet.bytes)) < 0) { + av_log(avc_context, AV_LOG_ERROR, "Error getting output packet of size %ld.\n", o_packet.bytes); + return ret; } - memcpy(outbuf, o_packet.packet, o_packet.bytes); + memcpy(pkt->data, o_packet.packet, o_packet.bytes); // HACK: assumes no encoder delay, this is true until libtheora becomes // multithreaded (which will be disabled unless explictly requested) - avc_context->coded_frame->pts = frame->pts; + pkt->pts = pkt->dts = frame->pts; avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask); + if (avc_context->coded_frame->key_frame) + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; - return o_packet.bytes; + return 0; } static av_cold int encode_close(AVCodecContext* avc_context) @@ -364,7 +367,7 @@ AVCodec ff_libtheora_encoder = { .priv_data_size = sizeof(TheoraContext), .init = encode_init, .close = encode_close, - .encode = encode_frame, + .encode2 = encode_frame, .capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"), diff --git a/libavcodec/libvpxenc.c b/libavcodec/libvpxenc.c index c7e18f0229..0aaf54775b 100644 --- a/libavcodec/libvpxenc.c +++ b/libavcodec/libvpxenc.c @@ -380,33 +380,33 @@ static inline void cx_pktcpy(struct FrameListData *dst, } /** - * Store coded frame information in format suitable for return from encode(). + * Store coded frame information in format suitable for return from encode2(). * - * Write buffer information from @a cx_frame to @a buf & @a buf_size. - * Timing/frame details to @a coded_frame. - * @return Frame size written to @a buf on success - * @return AVERROR(EINVAL) on error + * Write information from @a cx_frame to @a pkt + * @return packet data size on success + * @return a negative AVERROR on error */ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame, - uint8_t *buf, int buf_size, AVFrame *coded_frame) + AVPacket *pkt, AVFrame *coded_frame) { - if ((int) cx_frame->sz <= buf_size) { - buf_size = cx_frame->sz; - memcpy(buf, cx_frame->buf, buf_size); + int ret = ff_alloc_packet(pkt, cx_frame->sz); + if (ret >= 0) { + memcpy(pkt->data, cx_frame->buf, pkt->size); + pkt->pts = pkt->dts = cx_frame->pts; coded_frame->pts = cx_frame->pts; coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY); - if (coded_frame->key_frame) + if (coded_frame->key_frame) { coded_frame->pict_type = AV_PICTURE_TYPE_I; - else + pkt->flags |= AV_PKT_FLAG_KEY; + } else coded_frame->pict_type = AV_PICTURE_TYPE_P; } else { av_log(avctx, AV_LOG_ERROR, - "Compressed frame larger than storage provided! (%zu/%d)\n", - cx_frame->sz, buf_size); - return AVERROR(EINVAL); + "Error getting output packet of size %zu.\n", cx_frame->sz); + return ret; } - return buf_size; + return pkt->size; } /** @@ -417,7 +417,7 @@ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame, * @return AVERROR(EINVAL) on output size error * @return AVERROR(ENOMEM) on coded frame queue data allocation error */ -static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, +static int queue_frames(AVCodecContext *avctx, AVPacket *pkt_out, AVFrame *coded_frame) { VP8Context *ctx = avctx->priv_data; @@ -428,9 +428,9 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, if (ctx->coded_frame_list) { struct FrameListData *cx_frame = ctx->coded_frame_list; /* return the leading frame if we've already begun queueing */ - size = storeframe(avctx, cx_frame, buf, buf_size, coded_frame); + size = storeframe(avctx, cx_frame, pkt_out, coded_frame); if (size < 0) - return AVERROR(EINVAL); + return size; ctx->coded_frame_list = cx_frame->next; free_coded_frame(cx_frame); } @@ -447,9 +447,9 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, provided a frame for output */ assert(!ctx->coded_frame_list); cx_pktcpy(&cx_frame, pkt); - size = storeframe(avctx, &cx_frame, buf, buf_size, coded_frame); + size = storeframe(avctx, &cx_frame, pkt_out, coded_frame); if (size < 0) - return AVERROR(EINVAL); + return size; } else { struct FrameListData *cx_frame = av_malloc(sizeof(struct FrameListData)); @@ -495,11 +495,10 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, return size; } -static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, - void *data) +static int vp8_encode(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { VP8Context *ctx = avctx->priv_data; - AVFrame *frame = data; struct vpx_image *rawimg = NULL; int64_t timestamp = 0; int res, coded_size; @@ -521,7 +520,7 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, log_encoder_error(avctx, "Error encoding frame"); return AVERROR_INVALIDDATA; } - coded_size = queue_frames(avctx, buf, buf_size, avctx->coded_frame); + coded_size = queue_frames(avctx, pkt, avctx->coded_frame); if (!frame && avctx->flags & CODEC_FLAG_PASS1) { unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz); @@ -535,7 +534,9 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, av_base64_encode(avctx->stats_out, b64_size, ctx->twopass_stats.buf, ctx->twopass_stats.sz); } - return coded_size; + + *got_packet = !!coded_size; + return 0; } #define OFFSET(x) offsetof(VP8Context, x) @@ -598,7 +599,7 @@ AVCodec ff_libvpx_encoder = { .id = CODEC_ID_VP8, .priv_data_size = sizeof(VP8Context), .init = vp8_init, - .encode = vp8_encode, + .encode2 = vp8_encode, .close = vp8_free, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, diff --git a/libavcodec/libxavs.c b/libavcodec/libxavs.c index d7027d1092..0fc69300b5 100644 --- a/libavcodec/libxavs.c +++ b/libavcodec/libxavs.c @@ -37,6 +37,7 @@ #define XAVS_PART_B8X8 0x100 /* Analyze b16x8, b*/ typedef struct XavsContext { + AVClass *class; xavs_param_t params; xavs_t *enc; xavs_picture_t pic; @@ -53,6 +54,9 @@ typedef struct XavsContext { int fast_pskip; int mbtree; int mixed_refs; + + int64_t *pts_buffer; + int out_frame_count; } XavsContext; static void XAVS_log(void *p, int level, const char *fmt, va_list args) @@ -70,13 +74,24 @@ static void XAVS_log(void *p, int level, const char *fmt, va_list args) av_vlog(p, level_map[level], fmt, args); } -static int encode_nals(AVCodecContext *ctx, uint8_t *buf, - int size, xavs_nal_t *nals, - int nnal, int skip_sei) +static int encode_nals(AVCodecContext *ctx, AVPacket *pkt, + xavs_nal_t *nals, int nnal) { XavsContext *x4 = ctx->priv_data; - uint8_t *p = buf; - int i, s; + uint8_t *p; + int i, s, ret, size = x4->sei_size + FF_MIN_BUFFER_SIZE; + + if (!nnal) + return 0; + + for (i = 0; i < nnal; i++) + size += nals[i].i_payload; + + if ((ret = ff_alloc_packet(pkt, size)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", size); + return ret; + } + p = pkt->data; /* Write the SEI as part of the first frame. */ if (x4->sei_size > 0 && nnal > 0) { @@ -86,30 +101,22 @@ static int encode_nals(AVCodecContext *ctx, uint8_t *buf, } for (i = 0; i < nnal; i++) { - /* Don't put the SEI in extradata. */ - if (skip_sei && nals[i].i_type == NAL_SEI) { - x4->sei = av_malloc( 5 + nals[i].i_payload * 4 / 3 ); - if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nals + i) < 0) - return -1; - - continue; - } s = xavs_nal_encode(p, &size, 1, nals + i); if (s < 0) return -1; p += s; } + pkt->size = p - pkt->data; - return p - buf; + return 1; } -static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf, - int bufsize, void *data) +static int XAVS_frame(AVCodecContext *ctx, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { XavsContext *x4 = ctx->priv_data; - AVFrame *frame = data; xavs_nal_t *nal; - int nnal, i; + int nnal, i, ret; xavs_picture_t pic_out; x4->pic.img.i_csp = XAVS_CSP_I420; @@ -123,29 +130,44 @@ static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf, x4->pic.i_pts = frame->pts; x4->pic.i_type = XAVS_TYPE_AUTO; + x4->pts_buffer[ctx->frame_number % (ctx->max_b_frames+1)] = frame->pts; } if (xavs_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0) return -1; - bufsize = encode_nals(ctx, buf, bufsize, nal, nnal, 0); + ret = encode_nals(ctx, pkt, nal, nnal); - if (bufsize < 0) + if (ret < 0) return -1; - if (!bufsize && !frame && !(x4->end_of_stream)){ - buf[bufsize] = 0x0; - buf[bufsize+1] = 0x0; - buf[bufsize+2] = 0x01; - buf[bufsize+3] = 0xb1; - bufsize += 4; - x4->end_of_stream = END_OF_STREAM; - return bufsize; + if (!ret) { + if (!frame && !(x4->end_of_stream)) { + if ((ret = ff_alloc_packet(pkt, 4)) < 0) + return ret; + + pkt->data[0] = 0x0; + pkt->data[1] = 0x0; + pkt->data[2] = 0x01; + pkt->data[3] = 0xb1; + pkt->dts = 2*x4->pts_buffer[(x4->out_frame_count-1)%(ctx->max_b_frames+1)] - + x4->pts_buffer[(x4->out_frame_count-2)%(ctx->max_b_frames+1)]; + x4->end_of_stream = END_OF_STREAM; + *got_packet = 1; + } + return 0; } - /* FIXME: libxavs now provides DTS */ - /* but AVFrame doesn't have a field for it. */ + x4->out_pic.pts = pic_out.i_pts; + pkt->pts = pic_out.i_pts; + if (ctx->has_b_frames) { + if (!x4->out_frame_count) + pkt->dts = pkt->pts - (x4->pts_buffer[1] - x4->pts_buffer[0]); + else + pkt->dts = x4->pts_buffer[(x4->out_frame_count-1)%(ctx->max_b_frames+1)]; + } else + pkt->dts = pkt->pts; switch (pic_out.i_type) { case XAVS_TYPE_IDR: @@ -163,11 +185,16 @@ static int XAVS_frame(AVCodecContext *ctx, uint8_t *buf, /* There is no IDR frame in AVS JiZhun */ /* Sequence header is used as a flag */ - x4->out_pic.key_frame = pic_out.i_type == XAVS_TYPE_I; + if (pic_out.i_type == XAVS_TYPE_I) { + x4->out_pic.key_frame = 1; + pkt->flags |= AV_PKT_FLAG_KEY; + } x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA; - return bufsize; + x4->out_frame_count++; + *got_packet = ret; + return 0; } static av_cold int XAVS_close(AVCodecContext *avctx) @@ -176,6 +203,7 @@ static av_cold int XAVS_close(AVCodecContext *avctx) av_freep(&avctx->extradata); av_free(x4->sei); + av_freep(&x4->pts_buffer); if (x4->enc) xavs_encoder_close(x4->enc); @@ -324,17 +352,35 @@ static av_cold int XAVS_init(AVCodecContext *avctx) if (!x4->enc) return -1; + if (!(x4->pts_buffer = av_mallocz((avctx->max_b_frames+1) * sizeof(*x4->pts_buffer)))) + return AVERROR(ENOMEM); + avctx->coded_frame = &x4->out_pic; /* TAG: Do we have GLOBAL HEADER in AVS */ /* We Have PPS and SPS in AVS */ if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { xavs_nal_t *nal; - int nnal, s; + int nnal, s, i, size; + uint8_t *p; s = xavs_encoder_headers(x4->enc, &nal, &nnal); - avctx->extradata = av_malloc(s); - avctx->extradata_size = encode_nals(avctx, avctx->extradata, s, nal, nnal, 1); + avctx->extradata = p = av_malloc(s); + for (i = 0; i < nnal; i++) { + /* Don't put the SEI in extradata. */ + if (nal[i].i_type == NAL_SEI) { + x4->sei = av_malloc( 5 + nal[i].i_payload * 4 / 3 ); + if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nal + i) < 0) + return -1; + + continue; + } + size = xavs_nal_encode(p, &s, 1, nal + i); + if (size < 0) + return -1; + p += size; + } + avctx->extradata_size = p - avctx->extradata; } return 0; } @@ -376,7 +422,7 @@ AVCodec ff_libxavs_encoder = { .id = CODEC_ID_CAVS, .priv_data_size = sizeof(XavsContext), .init = XAVS_init, - .encode = XAVS_frame, + .encode2 = XAVS_frame, .close = XAVS_close, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS, .pix_fmts = (const enum PixelFormat[]) { PIX_FMT_YUV420P, PIX_FMT_NONE }, diff --git a/libavcodec/libxvidff.c b/libavcodec/libxvidff.c index 1501b44135..a77fafc7ec 100644 --- a/libavcodec/libxvidff.c +++ b/libavcodec/libxvidff.c @@ -33,6 +33,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/mathematics.h" #include "libxvid_internal.h" +#include "mpegvideo.h" /** * Buffer management macros. @@ -71,7 +72,7 @@ struct xvid_ff_pass1 { }; /* Prototypes - See function implementation for details */ -int xvid_strip_vol_header(AVCodecContext *avctx, unsigned char *frame, unsigned int header_len, unsigned int frame_len); +int xvid_strip_vol_header(AVCodecContext *avctx, AVPacket *pkt, unsigned int header_len, unsigned int frame_len); int xvid_ff_2pass(void *ref, int opt, void *p1, void *p2); void xvid_correct_framerate(AVCodecContext *avctx); @@ -370,17 +371,25 @@ static av_cold int xvid_encode_init(AVCodecContext *avctx) { * @param data Pointer to AVFrame of unencoded frame * @return Returns 0 on success, -1 on failure */ -static int xvid_encode_frame(AVCodecContext *avctx, - unsigned char *frame, int buf_size, void *data) { - int xerr, i; +static int xvid_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *picture, int *got_packet) +{ + int xerr, i, ret, user_packet = !!pkt->data; char *tmp; struct xvid_context *x = avctx->priv_data; - AVFrame *picture = data; AVFrame *p = &x->encoded_picture; + int mb_width = (avctx->width + 15) / 16; + int mb_height = (avctx->height + 15) / 16; xvid_enc_frame_t xvid_enc_frame; xvid_enc_stats_t xvid_enc_stats; + if (!user_packet && + (ret = av_new_packet(pkt, mb_width*mb_height*MAX_MB_BYTES + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + /* Start setting up the frame */ memset(&xvid_enc_frame, 0, sizeof(xvid_enc_frame)); xvid_enc_frame.version = XVID_VERSION; @@ -389,8 +398,8 @@ static int xvid_encode_frame(AVCodecContext *avctx, *p = *picture; /* Let Xvid know where to put the frame. */ - xvid_enc_frame.bitstream = frame; - xvid_enc_frame.length = buf_size; + xvid_enc_frame.bitstream = pkt->data; + xvid_enc_frame.length = pkt->size; /* Initialize input image fields */ if( avctx->pix_fmt != PIX_FMT_YUV420P ) { @@ -450,7 +459,9 @@ static int xvid_encode_frame(AVCodecContext *avctx, } } - if( 0 <= xerr ) { + if (xerr > 0) { + *got_packet = 1; + p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA; if( xvid_enc_stats.type == XVID_TYPE_PVOP ) p->pict_type = AV_PICTURE_TYPE_P; @@ -462,14 +473,21 @@ static int xvid_encode_frame(AVCodecContext *avctx, p->pict_type = AV_PICTURE_TYPE_I; if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) { p->key_frame = 1; + pkt->flags |= AV_PKT_FLAG_KEY; if( x->quicktime_format ) - return xvid_strip_vol_header(avctx, frame, + return xvid_strip_vol_header(avctx, pkt, xvid_enc_stats.hlength, xerr); } else p->key_frame = 0; - return xerr; + pkt->size = xerr; + + return 0; } else { + if (!user_packet) + av_free_packet(pkt); + if (!xerr) + return 0; av_log(avctx, AV_LOG_ERROR, "Xvid: Encoding Error Occurred: %i\n", xerr); return -1; } @@ -514,16 +532,16 @@ static av_cold int xvid_encode_close(AVCodecContext *avctx) { * @return Returns new length of frame data */ int xvid_strip_vol_header(AVCodecContext *avctx, - unsigned char *frame, + AVPacket *pkt, unsigned int header_len, unsigned int frame_len) { int vo_len = 0, i; for( i = 0; i < header_len - 3; i++ ) { - if( frame[i] == 0x00 && - frame[i+1] == 0x00 && - frame[i+2] == 0x01 && - frame[i+3] == 0xB6 ) { + if( pkt->data[i] == 0x00 && + pkt->data[i+1] == 0x00 && + pkt->data[i+2] == 0x01 && + pkt->data[i+3] == 0xB6 ) { vo_len = i; break; } @@ -533,15 +551,15 @@ int xvid_strip_vol_header(AVCodecContext *avctx, /* We need to store the header, so extract it */ if( avctx->extradata == NULL ) { avctx->extradata = av_malloc(vo_len); - memcpy(avctx->extradata, frame, vo_len); + memcpy(avctx->extradata, pkt->data, vo_len); avctx->extradata_size = vo_len; } /* Less dangerous now, memmove properly copies the two chunks of overlapping data */ - memmove(frame, &frame[vo_len], frame_len - vo_len); - return frame_len - vo_len; - } else - return frame_len; + memmove(pkt->data, &pkt->data[vo_len], frame_len - vo_len); + pkt->size = frame_len - vo_len; + } + return 0; } /** @@ -777,7 +795,7 @@ AVCodec ff_libxvid_encoder = { .id = CODEC_ID_MPEG4, .priv_data_size = sizeof(struct xvid_context), .init = xvid_encode_init, - .encode = xvid_encode_frame, + .encode2 = xvid_encode_frame, .close = xvid_encode_close, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name= NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"), diff --git a/libavcodec/ljpegenc.c b/libavcodec/ljpegenc.c index 1514b018ad..00d34ecc9c 100644 --- a/libavcodec/ljpegenc.c +++ b/libavcodec/ljpegenc.c @@ -32,21 +32,37 @@ #include "avcodec.h" #include "dsputil.h" +#include "internal.h" #include "mpegvideo.h" #include "mjpeg.h" #include "mjpegenc.h" -static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ MpegEncContext * const s = avctx->priv_data; MJpegContext * const m = s->mjpeg_ctx; - AVFrame *pict = data; const int width= s->width; const int height= s->height; AVFrame * const p= (AVFrame*)&s->current_picture; const int predictor= avctx->prediction_method+1; + const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; + const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; + int ret, max_pkt_size = FF_MIN_BUFFER_SIZE; + + if (avctx->pix_fmt == PIX_FMT_BGRA) + max_pkt_size += width * height * 3 * 4; + else { + max_pkt_size += mb_width * mb_height * 3 * 4 + * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]; + } + if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size); + return ret; + } - init_put_bits(&s->pb, buf, buf_size); + init_put_bits(&s->pb, pkt->data, pkt->size); *p = *pict; p->pict_type= AV_PICTURE_TYPE_I; @@ -112,8 +128,6 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in } }else{ int mb_x, mb_y, i; - const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; - const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; for(mb_y = 0; mb_y < mb_height; mb_y++) { if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ @@ -189,7 +203,11 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in s->picture_number++; flush_put_bits(&s->pb); - return put_bits_ptr(&s->pb) - s->pb.buf; + pkt->size = put_bits_ptr(&s->pb) - s->pb.buf; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; // return (put_bits_count(&f->pb)+7)/8; } @@ -200,7 +218,7 @@ AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need t .id = CODEC_ID_LJPEG, .priv_data_size = sizeof(MpegEncContext), .init = ff_MPV_encode_init, - .encode = encode_picture_lossless, + .encode2 = encode_picture_lossless, .close = ff_MPV_encode_end, .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"), }; diff --git a/libavcodec/qtrleenc.c b/libavcodec/qtrleenc.c index da520bc7eb..6ca4a3e818 100644 --- a/libavcodec/qtrleenc.c +++ b/libavcodec/qtrleenc.c @@ -25,6 +25,7 @@ #include "libavutil/imgutils.h" #include "avcodec.h" #include "bytestream.h" +#include "internal.h" /** Maximum RLE code for bulk copy */ #define MAX_RLE_BULK 127 @@ -102,7 +103,7 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx) return -1; } - s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size /* image base material */ + s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size*2 /* image base material */ + 15 /* header + footer */ + s->avctx->height*2 /* skip code+rle end */ + s->logical_width/MAX_RLE_BULK + 1 /* rle codes */; @@ -113,7 +114,7 @@ static av_cold int qtrle_encode_init(AVCodecContext *avctx) /** * Compute the best RLE sequence for a line */ -static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t **buf) +static void qtrle_encode_line(QtrleEncContext *s, const AVFrame *p, int line, uint8_t **buf) { int width=s->logical_width; int i; @@ -259,7 +260,7 @@ static void qtrle_encode_line(QtrleEncContext *s, AVFrame *p, int line, uint8_t } /** Encode frame including header */ -static int encode_frame(QtrleEncContext *s, AVFrame *p, uint8_t *buf) +static int encode_frame(QtrleEncContext *s, const AVFrame *p, uint8_t *buf) { int i; int start_line = 0; @@ -300,19 +301,19 @@ static int encode_frame(QtrleEncContext *s, AVFrame *p, uint8_t *buf) return buf - orig_buf; } -static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) +static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { QtrleEncContext * const s = avctx->priv_data; - AVFrame *pict = data; AVFrame * const p = &s->frame; - int chunksize; + int ret; *p = *pict; - if (buf_size < s->max_buf_size) { + if ((ret = ff_alloc_packet(pkt, s->max_buf_size)) < 0) { /* Upper bound check for compressed data */ - av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", buf_size, s->max_buf_size); - return -1; + av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", s->max_buf_size); + return ret; } if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) { @@ -325,11 +326,16 @@ static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, p->key_frame = 0; } - chunksize = encode_frame(s, pict, buf); + pkt->size = encode_frame(s, pict, pkt->data); /* save the current frame */ av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height); - return chunksize; + + if (p->key_frame) + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } static av_cold int qtrle_encode_end(AVCodecContext *avctx) @@ -349,7 +355,7 @@ AVCodec ff_qtrle_encoder = { .id = CODEC_ID_QTRLE, .priv_data_size = sizeof(QtrleEncContext), .init = qtrle_encode_init, - .encode = qtrle_encode_frame, + .encode2 = qtrle_encode_frame, .close = qtrle_encode_end, .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"), diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c index 4c118fe986..2bcbd7e721 100644 --- a/libavcodec/snowenc.c +++ b/libavcodec/snowenc.c @@ -1600,17 +1600,25 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){ } } -static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ +static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ SnowContext *s = avctx->priv_data; RangeCoder * const c= &s->c; - AVFrame *pict = data; + AVFrame *pic = &s->new_picture; const int width= s->avctx->width; const int height= s->avctx->height; - int level, orientation, plane_index, i, y; + int level, orientation, plane_index, i, y, ret; uint8_t rc_header_bak[sizeof(s->header_state)]; uint8_t rc_block_bak[sizeof(s->block_state)]; - ff_init_range_encoder(c, buf, buf_size); + if (!pkt->data && + (ret = av_new_packet(pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + + ff_init_range_encoder(c, pkt->data, pkt->size); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); for(i=0; i<3; i++){ @@ -1624,27 +1632,25 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, s->m.picture_number= avctx->frame_number; if(avctx->flags&CODEC_FLAG_PASS2){ - s->m.pict_type = - pict->pict_type= s->m.rc_context.entry[avctx->frame_number].new_pict_type; - s->keyframe= pict->pict_type==AV_PICTURE_TYPE_I; + s->m.pict_type = pic->pict_type = s->m.rc_context.entry[avctx->frame_number].new_pict_type; + s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I; if(!(avctx->flags&CODEC_FLAG_QSCALE)) { - pict->quality= ff_rate_estimate_qscale(&s->m, 0); - if (pict->quality < 0) + pic->quality = ff_rate_estimate_qscale(&s->m, 0); + if (pic->quality < 0) return -1; } }else{ s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0; - s->m.pict_type= - pict->pict_type= s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; + s->m.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; } if(s->pass1_rc && avctx->frame_number == 0) - pict->quality= 2*FF_QP2LAMBDA; - if(pict->quality){ - s->qlog= qscale2qlog(pict->quality); - s->lambda = pict->quality * 3/2; + pic->quality = 2*FF_QP2LAMBDA; + if (pic->quality) { + s->qlog = qscale2qlog(pic->quality); + s->lambda = pic->quality * 3/2; } - if(s->qlog < 0 || (!pict->quality && (avctx->flags & CODEC_FLAG_QSCALE))){ + if (s->qlog < 0 || (!pic->quality && (avctx->flags & CODEC_FLAG_QSCALE))) { s->qlog= LOSSLESS_QLOG; s->lambda = 0; }//else keep previous frame's qlog until after motion estimation @@ -1654,7 +1660,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, s->m.current_picture_ptr= &s->m.current_picture; s->m.last_picture.f.pts = s->m.current_picture.f.pts; s->m.current_picture.f.pts = pict->pts; - if(pict->pict_type == AV_PICTURE_TYPE_P){ + if(pic->pict_type == AV_PICTURE_TYPE_P){ int block_width = (width +15)>>4; int block_height= (height+15)>>4; int stride= s->current_picture.linesize[0]; @@ -1679,7 +1685,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, s->m.mb_stride= s->m.mb_width+1; s->m.b8_stride= 2*s->m.mb_width+1; s->m.f_code=1; - s->m.pict_type= pict->pict_type; + s->m.pict_type = pic->pict_type; s->m.me_method= s->avctx->me_method; s->m.me.scene_change_score=0; s->m.flags= s->avctx->flags; @@ -1703,13 +1709,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, redo_frame: - if(pict->pict_type == AV_PICTURE_TYPE_I) + if (pic->pict_type == AV_PICTURE_TYPE_I) s->spatial_decomposition_count= 5; else s->spatial_decomposition_count= 5; - s->m.pict_type = pict->pict_type; - s->qbias= pict->pict_type == AV_PICTURE_TYPE_P ? 2 : 0; + s->m.pict_type = pic->pict_type; + s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0; ff_snow_common_init_after_header(avctx); @@ -1742,12 +1748,12 @@ redo_frame: predict_plane(s, s->spatial_idwt_buffer, plane_index, 0); if( plane_index==0 - && pict->pict_type == AV_PICTURE_TYPE_P + && pic->pict_type == AV_PICTURE_TYPE_P && !(avctx->flags&CODEC_FLAG_PASS2) && s->m.me.scene_change_score > s->avctx->scenechange_threshold){ - ff_init_range_encoder(c, buf, buf_size); + ff_init_range_encoder(c, pkt->data, pkt->size); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); - pict->pict_type= AV_PICTURE_TYPE_I; + pic->pict_type= AV_PICTURE_TYPE_I; s->keyframe=1; s->current_picture.key_frame=1; goto redo_frame; @@ -1773,12 +1779,12 @@ redo_frame: ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count); if(s->pass1_rc && plane_index==0){ - int delta_qlog = ratecontrol_1pass(s, pict); + int delta_qlog = ratecontrol_1pass(s, pic); if (delta_qlog <= INT_MIN) return -1; if(delta_qlog){ //reordering qlog in the bitstream would eliminate this reset - ff_init_range_encoder(c, buf, buf_size); + ff_init_range_encoder(c, pkt->data, pkt->size); memcpy(s->header_state, rc_header_bak, sizeof(s->header_state)); memcpy(s->block_state, rc_block_bak, sizeof(s->block_state)); encode_header(s); @@ -1793,7 +1799,7 @@ redo_frame: if(!QUANTIZE2) quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias); if(orientation==0) - decorrelate(s, b, b->ibuf, b->stride, pict->pict_type == AV_PICTURE_TYPE_P, 0); + decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0); encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation); assert(b->parent==NULL || b->parent->stride == b->stride*2); if(orientation==0) @@ -1820,7 +1826,7 @@ redo_frame: predict_plane(s, s->spatial_idwt_buffer, plane_index, 1); }else{ //ME/MC only - if(pict->pict_type == AV_PICTURE_TYPE_I){ + if(pic->pict_type == AV_PICTURE_TYPE_I){ for(y=0; y<h; y++){ for(x=0; x<w; x++){ s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]= @@ -1859,7 +1865,7 @@ redo_frame: s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits; s->m.current_picture.f.display_picture_number = s->m.current_picture.f.coded_picture_number = avctx->frame_number; - s->m.current_picture.f.quality = pict->quality; + s->m.current_picture.f.quality = pic->quality; s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start); if(s->pass1_rc) if (ff_rate_estimate_qscale(&s->m, 0) < 0) @@ -1874,7 +1880,12 @@ redo_frame: emms_c(); - return ff_rac_terminate(c); + pkt->size = ff_rac_terminate(c); + if (avctx->coded_frame->key_frame) + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; + + return 0; } static av_cold int encode_end(AVCodecContext *avctx) @@ -1909,7 +1920,7 @@ AVCodec ff_snow_encoder = { .id = CODEC_ID_SNOW, .priv_data_size = sizeof(SnowContext), .init = encode_init, - .encode = encode_frame, + .encode2 = encode_frame, .close = encode_end, .long_name = NULL_IF_CONFIG_SMALL("Snow"), .priv_class = &snowenc_class, diff --git a/libavcodec/tiffenc.c b/libavcodec/tiffenc.c index 85976f41c0..a0fecf7b0f 100644 --- a/libavcodec/tiffenc.c +++ b/libavcodec/tiffenc.c @@ -202,15 +202,13 @@ static void pack_yuv(TiffEncoderContext * s, uint8_t * dst, int lnum) } } -static int encode_frame(AVCodecContext * avctx, unsigned char *buf, - int buf_size, void *data) +static int encode_frame(AVCodecContext * avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) { TiffEncoderContext *s = avctx->priv_data; - AVFrame *pict = data; AVFrame *const p = (AVFrame *) & s->picture; int i; - int n; - uint8_t *ptr = buf; + uint8_t *ptr; uint8_t *offset; uint32_t strips; uint32_t *strip_sizes = NULL; @@ -224,9 +222,6 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf, int shift_h, shift_v; s->avctx = avctx; - s->buf_start = buf; - s->buf = &ptr; - s->buf_size = buf_size; *p = *pict; p->pict_type = AV_PICTURE_TYPE_I; @@ -308,6 +303,17 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf, strips = (s->height - 1) / s->rps + 1; + if (!pkt->data && + (ret = av_new_packet(pkt, avctx->width * avctx->height * s->bpp * 2 + + avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); + return ret; + } + ptr = pkt->data; + s->buf_start = pkt->data; + s->buf = &ptr; + s->buf_size = pkt->size; + if (check_size(s, 8)) goto fail; @@ -339,7 +345,7 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf, zlen = bytes_per_row * s->rps; zbuf = av_malloc(zlen); - strip_offsets[0] = ptr - buf; + strip_offsets[0] = ptr - pkt->data; zn = 0; for (j = 0; j < s->rps; j++) { if (is_yuv){ @@ -352,14 +358,14 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf, p->data[0] + j * p->linesize[0], bytes_per_row); zn += bytes_per_row; } - n = encode_strip(s, zbuf, ptr, zn, s->compr); + ret = encode_strip(s, zbuf, ptr, zn, s->compr); av_free(zbuf); - if (n<0) { + if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n"); goto fail; } - ptr += n; - strip_sizes[0] = ptr - buf - strip_offsets[0]; + ptr += ret; + strip_sizes[0] = ptr - pkt->data - strip_offsets[0]; } else #endif { @@ -371,24 +377,23 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf, ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start), 12, FF_LZW_TIFF, put_bits); } - strip_offsets[i / s->rps] = ptr - buf; + strip_offsets[i / s->rps] = ptr - pkt->data; } if (is_yuv){ pack_yuv(s, yuv_line, i); - n = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr); + ret = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr); i += s->subsampling[1] - 1; } else - n = encode_strip(s, p->data[0] + i * p->linesize[0], + ret = encode_strip(s, p->data[0] + i * p->linesize[0], ptr, bytes_per_row, s->compr); - if (n < 0) { + if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n"); goto fail; } - strip_sizes[i / s->rps] += n; - ptr += n; + strip_sizes[i / s->rps] += ret; + ptr += ret; if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){ - int ret; ret = ff_lzw_encode_flush(s->lzws, flush_put_bits); strip_sizes[(i / s->rps )] += ret ; ptr += ret; @@ -440,15 +445,19 @@ static int encode_frame(AVCodecContext * avctx, unsigned char *buf, add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling); add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw); } - bytestream_put_le32(&offset, ptr - buf); // write offset to dir + bytestream_put_le32(&offset, ptr - pkt->data); // write offset to dir - if (check_size(s, 6 + s->num_entries * 12)) + if (check_size(s, 6 + s->num_entries * 12)) { + ret = AVERROR(EINVAL); goto fail; + } bytestream_put_le16(&ptr, s->num_entries); // write tag count bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12); bytestream_put_le32(&ptr, 0); - ret = ptr - buf; + pkt->size = ptr - pkt->data; + pkt->flags |= AV_PKT_FLAG_KEY; + *got_packet = 1; fail: av_free(strip_sizes); @@ -483,7 +492,7 @@ AVCodec ff_tiff_encoder = { .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_TIFF, .priv_data_size = sizeof(TiffEncoderContext), - .encode = encode_frame, + .encode2 = encode_frame, .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE, diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c index 01d9f1ee8a..985823dc25 100644 --- a/libavcodec/truemotion2.c +++ b/libavcodec/truemotion2.c @@ -135,7 +135,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code) huff.val_bits, huff.max_bits); return -1; } - if((huff.nodes < 0) || (huff.nodes > 0x10000)) { + if((huff.nodes <= 0) || (huff.nodes > 0x10000)) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes); return -1; } diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 7a4e01c833..7bd1059164 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -1188,9 +1188,6 @@ int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf } #endif -#define MAX_CODED_FRAME_SIZE(width, height)\ - (9*(width)*(height) + FF_MIN_BUFFER_SIZE) - int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, @@ -1209,44 +1206,15 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) return AVERROR(EINVAL); - if (avctx->codec->encode2) { - *got_packet_ptr = 0; - ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); - if (!ret) { - if (!*got_packet_ptr) - avpkt->size = 0; - else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) - avpkt->pts = avpkt->dts = frame->pts; - } - } else { - /* for compatibility with encoders not supporting encode2(), we need to - allocate a packet buffer if the user has not provided one or check - the size otherwise */ - int buf_size = avpkt->size; - - if (!user_packet) - buf_size = MAX_CODED_FRAME_SIZE(avctx->width, avctx->height); - - if ((ret = ff_alloc_packet(avpkt, buf_size))) - return ret; - - /* encode the frame */ - ret = avctx->codec->encode(avctx, avpkt->data, avpkt->size, frame); - if (ret >= 0) { - if (!ret) { - /* no output. if the packet data was allocated by libavcodec, - free it */ - if (!user_packet) - av_freep(&avpkt->data); - } else if (avctx->coded_frame) { - avpkt->pts = avctx->coded_frame->pts; - avpkt->flags |= AV_PKT_FLAG_KEY*!!avctx->coded_frame->key_frame; - } + av_assert0(avctx->codec->encode2); - avpkt->size = ret; - *got_packet_ptr = (ret > 0); - ret = 0; - } + *got_packet_ptr = 0; + ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); + if (!ret) { + if (!*got_packet_ptr) + avpkt->size = 0; + else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) + avpkt->pts = avpkt->dts = frame->pts; } if (!ret) diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c index a9d152fde3..09d22beef6 100644 --- a/libavcodec/wavpack.c +++ b/libavcodec/wavpack.c @@ -813,8 +813,8 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, s->hybrid = s->frame_flags & WV_HYBRID_MODE; s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE; s->post_shift = bpp * 8 - orig_bpp + ((s->frame_flags >> 13) & 0x1f); - s->hybrid_maxclip = (( 1LL << (orig_bpp - 1)) - 1) >> s->post_shift; - s->hybrid_minclip = ((-1LL << (orig_bpp - 1))) >> s->post_shift; + s->hybrid_maxclip = (( 1LL << (orig_bpp - 1)) - 1); + s->hybrid_minclip = ((-1LL << (orig_bpp - 1))); s->CRC = AV_RL32(buf); buf += 4; if (wc->mkv_mode) buf += 4; //skip block size; |