diff options
author | anatoly <anatoly.nenashev@ovsoft.ru> | 2011-03-27 22:16:25 +0200 |
---|---|---|
committer | Anton Khirnov <anton@khirnov.net> | 2011-03-30 07:18:34 +0200 |
commit | e0e3b8b297bae5144f23fd4b46a1309857040b63 (patch) | |
tree | 93c5bd8d69c498c14811c0e144fe578622c4d6e9 | |
parent | f16055eedf7999bb0c754dfde47d8c726b94c847 (diff) | |
download | ffmpeg-e0e3b8b297bae5144f23fd4b46a1309857040b63.tar.gz |
Add support for picture_ptr field in MJpegDecodeContext
Signed-off-by: Anton Khirnov <anton@khirnov.net>
-rw-r--r-- | libavcodec/jpeglsdec.c | 18 | ||||
-rw-r--r-- | libavcodec/mjpegbdec.c | 2 | ||||
-rw-r--r-- | libavcodec/mjpegdec.c | 36 | ||||
-rw-r--r-- | libavcodec/mjpegdec.h | 1 |
4 files changed, 30 insertions, 27 deletions
diff --git a/libavcodec/jpeglsdec.c b/libavcodec/jpeglsdec.c index 2ee719b374..adaa0a3d96 100644 --- a/libavcodec/jpeglsdec.c +++ b/libavcodec/jpeglsdec.c @@ -262,9 +262,9 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor JLSState *state; int off = 0, stride = 1, width, shift; - zero = av_mallocz(s->picture.linesize[0]); + zero = av_mallocz(s->picture_ptr->linesize[0]); last = zero; - cur = s->picture.data[0]; + cur = s->picture_ptr->data[0]; state = av_mallocz(sizeof(JLSState)); /* initialize JPEG-LS state from JPEG parameters */ @@ -299,7 +299,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor t = *((uint16_t*)last); } last = cur; - cur += s->picture.linesize[0]; + cur += s->picture_ptr->linesize[0]; if (s->restart_interval && !--s->restart_count) { align_get_bits(&s->gb); @@ -309,7 +309,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor } else if(ilv == 1) { /* line interleaving */ int j; int Rc[3] = {0, 0, 0}; - memset(cur, 0, s->picture.linesize[0]); + memset(cur, 0, s->picture_ptr->linesize[0]); width = s->width * 3; for(i = 0; i < s->height; i++) { for(j = 0; j < 3; j++) { @@ -322,7 +322,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor } } last = cur; - cur += s->picture.linesize[0]; + cur += s->picture_ptr->linesize[0]; } } else if(ilv == 2) { /* sample interleaving */ av_log(s->avctx, AV_LOG_ERROR, "Sample interleaved images are not supported.\n"); @@ -337,22 +337,22 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor w = s->width * s->nb_components; if(s->bits <= 8){ - uint8_t *src = s->picture.data[0]; + uint8_t *src = s->picture_ptr->data[0]; for(i = 0; i < s->height; i++){ for(x = off; x < w; x+= stride){ src[x] <<= shift; } - src += s->picture.linesize[0]; + src += s->picture_ptr->linesize[0]; } }else{ - uint16_t *src = (uint16_t*) s->picture.data[0]; + uint16_t *src = (uint16_t*) s->picture_ptr->data[0]; for(i = 0; i < s->height; i++){ for(x = 0; x < w; x++){ src[x] <<= shift; } - src += s->picture.linesize[0]/2; + src += s->picture_ptr->linesize[0]/2; } } } diff --git a/libavcodec/mjpegbdec.c b/libavcodec/mjpegbdec.c index f3366ffd6f..837304ea74 100644 --- a/libavcodec/mjpegbdec.c +++ b/libavcodec/mjpegbdec.c @@ -129,7 +129,7 @@ read_header: //XXX FIXME factorize, this looks very similar to the EOI code - *picture= s->picture; + *picture= *s->picture_ptr; *data_size = sizeof(AVFrame); if(!s->lossless){ diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c index da83cdfe2e..6f29e466eb 100644 --- a/libavcodec/mjpegdec.c +++ b/libavcodec/mjpegdec.c @@ -81,6 +81,9 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx) { MJpegDecodeContext *s = avctx->priv_data; + if (!s->picture_ptr) + s->picture_ptr = &s->picture; + s->avctx = avctx; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); @@ -282,8 +285,8 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s) s->height < ((s->org_height * 3) / 4)) { s->interlaced = 1; s->bottom_field = s->interlace_polarity; - s->picture.interlaced_frame = 1; - s->picture.top_field_first = !s->interlace_polarity; + s->picture_ptr->interlaced_frame = 1; + s->picture_ptr->top_field_first = !s->interlace_polarity; height *= 2; } @@ -342,20 +345,19 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s) s->avctx->pix_fmt = PIX_FMT_GRAY16; } - if(s->picture.data[0]) - s->avctx->release_buffer(s->avctx, &s->picture); + if(s->picture_ptr->data[0]) + s->avctx->release_buffer(s->avctx, s->picture_ptr); - s->picture.reference= 0; - if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){ + if(s->avctx->get_buffer(s->avctx, s->picture_ptr) < 0){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } - s->picture.pict_type= FF_I_TYPE; - s->picture.key_frame= 1; + s->picture_ptr->pict_type= FF_I_TYPE; + s->picture_ptr->key_frame= 1; s->got_picture = 1; for(i=0; i<3; i++){ - s->linesize[i]= s->picture.linesize[i] << s->interlaced; + s->linesize[i]= s->picture_ptr->linesize[i] << s->interlaced; } // printf("%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height); @@ -635,7 +637,7 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point } for(mb_y = 0; mb_y < s->mb_height; mb_y++) { const int modified_predictor= mb_y ? predictor : 1; - uint8_t *ptr = s->picture.data[0] + (linesize * mb_y); + uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y); if (s->interlaced && s->bottom_field) ptr += linesize >> 1; @@ -712,7 +714,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point for(j=0; j<n; j++) { int pred; - ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap + ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap if(y==0 && mb_y==0){ if(x==0 && mb_x==0){ pred= 128 << point_transform; @@ -752,7 +754,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point for(j=0; j<n; j++) { int pred; - ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap + ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); *ptr= pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform); if (++x == h) { @@ -804,7 +806,7 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, i } for(i=0; i < nb_components; i++) { int c = s->comp_index[i]; - data[c] = s->picture.data[c]; + data[c] = s->picture_ptr->data[c]; reference_data[c] = reference ? reference->data[c] : NULL; linesize[c]=s->linesize[c]; s->coefs_finished[c] |= 1; @@ -889,7 +891,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int s int mb_x, mb_y; int EOBRUN = 0; int c = s->comp_index[0]; - uint8_t* data = s->picture.data[c]; + uint8_t* data = s->picture_ptr->data[c]; const uint8_t *reference_data = reference ? reference->data[c] : NULL; int linesize = s->linesize[c]; int last_scan = 0; @@ -1521,7 +1523,7 @@ eoi_parser: if (s->bottom_field == !s->interlace_polarity) goto not_the_end; } - *picture = s->picture; + *picture = *s->picture_ptr; *data_size = sizeof(AVFrame); if(!s->lossless){ @@ -1593,8 +1595,8 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx) MJpegDecodeContext *s = avctx->priv_data; int i, j; - if (s->picture.data[0]) - avctx->release_buffer(avctx, &s->picture); + if (s->picture_ptr && s->picture_ptr->data[0]) + avctx->release_buffer(avctx, s->picture_ptr); av_free(s->buffer); av_free(s->qscale_table); diff --git a/libavcodec/mjpegdec.h b/libavcodec/mjpegdec.h index 72fe6fa088..52c256ee2f 100644 --- a/libavcodec/mjpegdec.h +++ b/libavcodec/mjpegdec.h @@ -81,6 +81,7 @@ typedef struct MJpegDecodeContext { int quant_index[4]; /* quant table index for each component */ int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */ AVFrame picture; /* picture structure */ + AVFrame *picture_ptr; /* pointer to picture structure */ int got_picture; ///< we found a SOF and picture is valid, too. int linesize[MAX_COMPONENTS]; ///< linesize << interlaced int8_t *qscale_table; |