diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2013-12-06 12:21:31 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2013-12-06 13:30:23 +0100 |
commit | 93947d88f2abdf17d374a2d83cdb051ef9bafb2c (patch) | |
tree | 4e1f967ed1e56ae70d3df7eaa7de32c20f905db6 | |
parent | d756b2b530ca742ac930ca6e9d1ba86ec8af86c9 (diff) | |
parent | 24abd806ea0cfb0d988d2f0044eac79cff12918c (diff) | |
download | ffmpeg-93947d88f2abdf17d374a2d83cdb051ef9bafb2c.tar.gz |
Merge commit '24abd806ea0cfb0d988d2f0044eac79cff12918c'
* commit '24abd806ea0cfb0d988d2f0044eac79cff12918c':
ljpegenc: deMpegEncContextize
Conflicts:
libavcodec/ljpegenc.c
libavcodec/mpegvideo.h
libavcodec/mpegvideo_enc.c
tests/ref/vsynth/vsynth1-ljpeg
tests/ref/vsynth/vsynth2-ljpeg
Merged-by: Michael Niedermayer <michaelni@gmx.at>
-rw-r--r-- | libavcodec/ljpegenc.c | 199 | ||||
-rw-r--r-- | libavcodec/mjpegenc.c | 23 | ||||
-rw-r--r-- | libavcodec/mpegvideo.h | 2 | ||||
-rw-r--r-- | libavcodec/mpegvideo_enc.c | 41 |
4 files changed, 153 insertions, 112 deletions
diff --git a/libavcodec/ljpegenc.c b/libavcodec/ljpegenc.c index 99fee70aec..c5d99911d6 100644 --- a/libavcodec/ljpegenc.c +++ b/libavcodec/ljpegenc.c @@ -30,66 +30,71 @@ * lossless JPEG encoder. */ +#include "libavutil/frame.h" +#include "libavutil/mem.h" +#include "libavutil/pixdesc.h" + #include "avcodec.h" +#include "dsputil.h" #include "internal.h" #include "mpegvideo.h" #include "mjpeg.h" #include "mjpegenc.h" +typedef struct LJpegEncContext { + DSPContext dsp; + ScanTable scantable; + uint16_t matrix[64]; + + int vsample[3]; + int hsample[3]; + + uint16_t huff_code_dc_luminance[12]; + uint16_t huff_code_dc_chrominance[12]; + uint8_t huff_size_dc_luminance[12]; + uint8_t huff_size_dc_chrominance[12]; + + uint16_t (*scratch)[4]; +} LJpegEncContext; static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { - MpegEncContext * const s = avctx->priv_data; - MJpegContext * const m = s->mjpeg_ctx; - const int width= s->width; - const int height= s->height; - AVFrame * const p = &s->current_picture.f; + LJpegEncContext *s = avctx->priv_data; + PutBitContext pb; + const int width = avctx->width; + const int height = avctx->height; const int predictor= avctx->prediction_method+1; - const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; - const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; - int ret, max_pkt_size = FF_MIN_BUFFER_SIZE; + const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0]; + const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0]; + int max_pkt_size = FF_MIN_BUFFER_SIZE; + int ret, header_bits; - if (avctx->pix_fmt == AV_PIX_FMT_BGRA) + if( avctx->pix_fmt == AV_PIX_FMT_BGR0 + || avctx->pix_fmt == AV_PIX_FMT_BGRA + || avctx->pix_fmt == AV_PIX_FMT_BGR24) max_pkt_size += width * height * 3 * 4; else { max_pkt_size += mb_width * mb_height * 3 * 4 - * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]; - } - - if (!s->rd_scratchpad) { - int alloc_size = FFALIGN(FFABS(pict->linesize[0]) + 64, 32); - s->me.scratchpad = - s->rd_scratchpad = av_mallocz(alloc_size * 4 * 16 * 2); - if (!s->rd_scratchpad) { - av_log(avctx, AV_LOG_ERROR, "failed to allocate context scratch buffers.\n"); - return AVERROR(ENOMEM); - } + * s->hsample[0] * s->vsample[0]; } if ((ret = ff_alloc_packet2(avctx, pkt, max_pkt_size)) < 0) return ret; - init_put_bits(&s->pb, pkt->data, pkt->size); + init_put_bits(&pb, pkt->data, pkt->size); - av_frame_unref(p); - ret = av_frame_ref(p, pict); - if (ret < 0) - return ret; - p->pict_type= AV_PICTURE_TYPE_I; - p->key_frame= 1; - - ff_mjpeg_encode_picture_header(avctx, &s->pb, &s->intra_scantable, - s->intra_matrix); + ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable, + s->matrix); - s->header_bits= put_bits_count(&s->pb); + header_bits = put_bits_count(&pb); - if(avctx->pix_fmt == AV_PIX_FMT_BGR0 + if( avctx->pix_fmt == AV_PIX_FMT_BGR0 || avctx->pix_fmt == AV_PIX_FMT_BGRA || avctx->pix_fmt == AV_PIX_FMT_BGR24){ int x, y, i; - const int linesize= p->linesize[0]; - uint16_t (*buffer)[4]= (void *) s->rd_scratchpad; + const int linesize = pict->linesize[0]; + uint16_t (*buffer)[4] = s->scratch; int left[3], top[3], topleft[3]; for(i=0; i<3; i++){ @@ -98,10 +103,10 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, for(y = 0; y < height; y++) { const int modified_predictor= y ? predictor : 1; - uint8_t *ptr = p->data[0] + (linesize * y); + uint8_t *ptr = pict->data[0] + (linesize * y); - if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){ - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); + if(pb.buf_end - pb.buf - (put_bits_count(&pb) >> 3) < width * 3 * 4) { + av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } @@ -132,9 +137,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100; if(i==0) - ff_mjpeg_encode_dc(&s->pb, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly + ff_mjpeg_encode_dc(&pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly else - ff_mjpeg_encode_dc(&s->pb, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); + ff_mjpeg_encode_dc(&pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } } } @@ -142,8 +147,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, int mb_x, mb_y, i; for(mb_y = 0; mb_y < mb_height; mb_y++) { - if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); + if (pb.buf_end - pb.buf - (put_bits_count(&pb) >> 3) < + mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) { + av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } for(mb_x = 0; mb_x < mb_width; mb_x++) { @@ -151,15 +157,15 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, for(i=0;i<3;i++) { uint8_t *ptr; int x, y, h, v, linesize; - h = s->mjpeg_hsample[i]; - v = s->mjpeg_vsample[i]; - linesize= p->linesize[i]; + h = s->hsample[i]; + v = s->vsample[i]; + linesize = pict->linesize[i]; for(y=0; y<v; y++){ for(x=0; x<h; x++){ int pred; - ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap + ptr = pict->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap if(y==0 && mb_y==0){ if(x==0 && mb_x==0){ pred= 128; @@ -175,9 +181,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, } if(i==0) - ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly + ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly else - ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); + ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } } } @@ -185,8 +191,8 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, for(i=0;i<3;i++) { uint8_t *ptr; int x, y, h, v, linesize; - h = s->mjpeg_hsample[i]; - v = s->mjpeg_vsample[i]; + h = s->hsample[i]; + v = s->vsample[i]; linesize = pict->linesize[i]; for(y=0; y<v; y++){ @@ -197,9 +203,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); if(i==0) - ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly + ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly else - ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); + ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } } } @@ -209,14 +215,12 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, } emms_c(); - av_assert0(s->esc_pos == s->header_bits >> 3); - ff_mjpeg_escape_FF(&s->pb, s->esc_pos); - ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits); - s->picture_number++; + ff_mjpeg_escape_FF(&pb, header_bits >> 3); + ff_mjpeg_encode_picture_trailer(&pb, header_bits); - flush_put_bits(&s->pb); - pkt->size = put_bits_ptr(&s->pb) - s->pb.buf; + flush_put_bits(&pb); + pkt->size = put_bits_ptr(&pb) - pb.buf; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; @@ -224,19 +228,88 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, // return (put_bits_count(&f->pb)+7)/8; } +static av_cold int ljpeg_encode_close(AVCodecContext *avctx) +{ + LJpegEncContext *s = avctx->priv_data; + + av_frame_free(&avctx->coded_frame); + av_freep(&s->scratch); + + return 0; +} + +static av_cold int ljpeg_encode_init(AVCodecContext *avctx) +{ + LJpegEncContext *s = avctx->priv_data; + int chroma_v_shift, chroma_h_shift; + + if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P || + avctx->pix_fmt == AV_PIX_FMT_YUV422P || + avctx->pix_fmt == AV_PIX_FMT_YUV444P) && + avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) { + av_log(avctx, AV_LOG_ERROR, + "Limited range YUV is non-standard, set strict_std_compliance to " + "at least unofficial to use it.\n"); + return AVERROR(EINVAL); + } + + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); + + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + avctx->coded_frame->key_frame = 1; + + s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch)); + + ff_dsputil_init(&s->dsp, avctx); + ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); + + av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, + &chroma_v_shift); + + if ( avctx->pix_fmt == AV_PIX_FMT_BGR0 + || avctx->pix_fmt == AV_PIX_FMT_BGRA + || avctx->pix_fmt == AV_PIX_FMT_BGR24) { + s->vsample[0] = s->hsample[0] = + s->vsample[1] = s->hsample[1] = + s->vsample[2] = s->hsample[2] = 1; + } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) { + s->vsample[0] = s->vsample[1] = s->vsample[2] = 2; + s->hsample[0] = s->hsample[1] = s->hsample[2] = 1; + } else { + s->vsample[0] = 2; + s->vsample[1] = 2 >> chroma_v_shift; + s->vsample[2] = 2 >> chroma_v_shift; + s->hsample[0] = 2; + s->hsample[1] = 2 >> chroma_h_shift; + s->hsample[2] = 2 >> chroma_h_shift; + } + + ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance, + s->huff_code_dc_luminance, + avpriv_mjpeg_bits_dc_luminance, + avpriv_mjpeg_val_dc); + ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance, + s->huff_code_dc_chrominance, + avpriv_mjpeg_bits_dc_chrominance, + avpriv_mjpeg_val_dc); + + return 0; +} -AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need them +AVCodec ff_ljpeg_encoder = { .name = "ljpeg", .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_LJPEG, - .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, + .priv_data_size = sizeof(LJpegEncContext), + .init = ljpeg_encode_init, .encode2 = encode_picture_lossless, - .close = ff_MPV_encode_end, + .close = ljpeg_encode_close, .pix_fmts = (const enum AVPixelFormat[]){ - AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR0, + AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, - AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE}, }; diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c index 423a2f5c23..a25af70722 100644 --- a/libavcodec/mjpegenc.c +++ b/libavcodec/mjpegenc.c @@ -212,15 +212,13 @@ void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, const int lossless = avctx->codec_id != AV_CODEC_ID_MJPEG; int hsample[3], vsample[3]; int i; - MpegEncContext *s = avctx->priv_data; - av_assert0(avctx->codec->priv_data_size == sizeof(MpegEncContext)); av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); if (avctx->codec->id == AV_CODEC_ID_LJPEG && (avctx->pix_fmt == AV_PIX_FMT_BGR0 - || s->avctx->pix_fmt == AV_PIX_FMT_BGRA - || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) { + || avctx->pix_fmt == AV_PIX_FMT_BGRA + || avctx->pix_fmt == AV_PIX_FMT_BGR24)) { vsample[0] = hsample[0] = vsample[1] = hsample[1] = vsample[2] = hsample[2] = 1; @@ -319,9 +317,14 @@ void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, put_bits(pb, 8, 0); /* Ah/Al (not used) */ end: - s->esc_pos = put_bits_count(pb) >> 3; - for(i=1; i<s->slice_context_count; i++) - s->thread_context[i]->esc_pos = 0; + if (avctx->codec->priv_data_size == sizeof(MpegEncContext)) { + MpegEncContext *s = avctx->priv_data; + av_assert0(avctx->codec->priv_data_size == sizeof(MpegEncContext)); + + s->esc_pos = put_bits_count(pb) >> 3; + for(i=1; i<s->slice_context_count; i++) + s->thread_context[i]->esc_pos = 0; + } } void ff_mjpeg_escape_FF(PutBitContext *pb, int start) @@ -530,6 +533,9 @@ static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, MpegEncContext *s = avctx->priv_data; AVFrame pic = *pic_arg; int i; + int chroma_h_shift, chroma_v_shift; + + av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); //CODEC_FLAG_EMU_EDGE have to be cleared if(s->avctx->flags & CODEC_FLAG_EMU_EDGE) @@ -537,7 +543,8 @@ static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, //picture should be flipped upside-down for(i=0; i < 3; i++) { - pic.data[i] += (pic.linesize[i] * (s->mjpeg_vsample[i] * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 )); + int vsample = i ? 2 >> chroma_v_shift : 2; + pic.data[i] += (pic.linesize[i] * (vsample * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 )); pic.linesize[i] *= -1; } return ff_MPV_encode_picture(avctx, pkt, &pic, got_packet); diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 1d2b29145c..65c4f9e0d5 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -625,8 +625,6 @@ typedef struct MpegEncContext { /* MJPEG specific */ struct MJpegContext *mjpeg_ctx; - int mjpeg_vsample[3]; ///< vertical sampling factors, default = {2, 1, 1} - int mjpeg_hsample[3]; ///< horizontal sampling factors, default = {2, 1, 1} int esc_pos; /* MSMPEG4 specific */ diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index dcbdc036da..533e9e1357 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -243,7 +243,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i, ret; - int chroma_h_shift, chroma_v_shift; MPV_encode_defaults(s); @@ -256,21 +255,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) return -1; } break; - case AV_CODEC_ID_LJPEG: - if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P && - avctx->pix_fmt != AV_PIX_FMT_YUVJ422P && - avctx->pix_fmt != AV_PIX_FMT_YUVJ444P && - avctx->pix_fmt != AV_PIX_FMT_BGR0 && - avctx->pix_fmt != AV_PIX_FMT_BGRA && - avctx->pix_fmt != AV_PIX_FMT_BGR24 && - ((avctx->pix_fmt != AV_PIX_FMT_YUV420P && - avctx->pix_fmt != AV_PIX_FMT_YUV422P && - avctx->pix_fmt != AV_PIX_FMT_YUV444P) || - avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) { - av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n"); - return -1; - } - break; case AV_CODEC_ID_MJPEG: case AV_CODEC_ID_AMV: if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P && @@ -657,8 +641,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias); - avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); - if (avctx->codec_id == AV_CODEC_ID_MPEG4 && s->avctx->time_base.den > (1 << 16) - 1) { av_log(avctx, AV_LOG_ERROR, @@ -682,30 +664,11 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); s->rtp_mode = 1; break; - case AV_CODEC_ID_LJPEG: case AV_CODEC_ID_MJPEG: case AV_CODEC_ID_AMV: s->out_format = FMT_MJPEG; s->intra_only = 1; /* force intra only for jpeg */ - if (avctx->codec->id == AV_CODEC_ID_LJPEG && - (avctx->pix_fmt == AV_PIX_FMT_BGR0 - || s->avctx->pix_fmt == AV_PIX_FMT_BGRA - || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) { - s->mjpeg_vsample[0] = s->mjpeg_hsample[0] = - s->mjpeg_vsample[1] = s->mjpeg_hsample[1] = - s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1; - } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) { - s->mjpeg_vsample[0] = s->mjpeg_vsample[1] = s->mjpeg_vsample[2] = 2; - s->mjpeg_hsample[0] = s->mjpeg_hsample[1] = s->mjpeg_hsample[2] = 1; - } else { - s->mjpeg_vsample[0] = 2; - s->mjpeg_vsample[1] = 2 >> chroma_v_shift; - s->mjpeg_vsample[2] = 2 >> chroma_v_shift; - s->mjpeg_hsample[0] = 2; - s->mjpeg_hsample[1] = 2 >> chroma_h_shift; - s->mjpeg_hsample[2] = 2 >> chroma_h_shift; - } - if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) || + if (!CONFIG_MJPEG_ENCODER || ff_mjpeg_encode_init(s) < 0) return -1; avctx->delay = 0; @@ -954,7 +917,7 @@ av_cold int ff_MPV_encode_end(AVCodecContext *avctx) ff_rate_control_uninit(s); ff_MPV_common_end(s); - if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) && + if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG) ff_mjpeg_encode_close(s); |