diff options
author | Diego Biurrun <diego@biurrun.de> | 2007-02-07 01:48:09 +0000 |
---|---|---|
committer | Diego Biurrun <diego@biurrun.de> | 2007-02-07 01:48:09 +0000 |
commit | 71e445fca3ccc1285068386bf9858d180b0fecfc (patch) | |
tree | 230f6ba62bf0a9af096cad7c2a7c332b066e2965 | |
parent | 917fa192c72e226ffaddefbe6661ec3c405156cf (diff) | |
download | ffmpeg-71e445fca3ccc1285068386bf9858d180b0fecfc.tar.gz |
Replace deprecated PIX_FMT names by the newer variants.
Originally committed as revision 7867 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r-- | ffplay.c | 2 | ||||
-rw-r--r-- | libavcodec/8bps.c | 10 | ||||
-rw-r--r-- | libavcodec/cscd.c | 2 | ||||
-rw-r--r-- | libavcodec/ffv1.c | 6 | ||||
-rw-r--r-- | libavcodec/huffyuv.c | 4 | ||||
-rw-r--r-- | libavcodec/imgconvert.c | 70 | ||||
-rw-r--r-- | libavcodec/loco.c | 2 | ||||
-rw-r--r-- | libavcodec/mjpeg.c | 6 | ||||
-rw-r--r-- | libavcodec/png.c | 6 | ||||
-rw-r--r-- | libavcodec/pnm.c | 10 | ||||
-rw-r--r-- | libavcodec/qtrle.c | 4 | ||||
-rw-r--r-- | libavcodec/raw.c | 6 | ||||
-rw-r--r-- | libavcodec/snow.c | 2 | ||||
-rw-r--r-- | libavcodec/targa.c | 2 | ||||
-rw-r--r-- | libavcodec/truemotion1.c | 2 | ||||
-rw-r--r-- | libavcodec/tscc.c | 2 | ||||
-rw-r--r-- | libavcodec/utils.c | 4 | ||||
-rw-r--r-- | libavformat/dc1394.c | 2 | ||||
-rw-r--r-- | libavformat/grab.c | 4 | ||||
-rw-r--r-- | libavformat/sgi.c | 12 | ||||
-rw-r--r-- | libavformat/v4l2.c | 4 | ||||
-rw-r--r-- | libavformat/x11grab.c | 2 | ||||
-rw-r--r-- | libavutil/avutil.h | 13 | ||||
-rw-r--r-- | vhook/imlib2.c | 6 | ||||
-rw-r--r-- | vhook/watermark.c | 36 |
25 files changed, 105 insertions, 114 deletions
@@ -1174,7 +1174,7 @@ static void alloc_picture(void *opaque) case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: - case PIX_FMT_YUV422: + case PIX_FMT_YUYV422: case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: is_yuv = 1; diff --git a/libavcodec/8bps.c b/libavcodec/8bps.c index 297465043f..3d4eb05b36 100644 --- a/libavcodec/8bps.c +++ b/libavcodec/8bps.c @@ -27,8 +27,8 @@ * http://www.pcisys.net/~melanson/codecs/ * * Supports: PAL8 (RGB 8bpp, paletted) - * : BGR24 (RGB 24bpp) (can also output it as RGBA32) - * : RGBA32 (RGB 32bpp, 4th plane is probably alpha and it's ignored) + * : BGR24 (RGB 24bpp) (can also output it as RGB32) + * : RGB32 (RGB 32bpp, 4th plane is probably alpha and it's ignored) * */ @@ -39,7 +39,7 @@ #include "avcodec.h" -static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGBA32, -1}; +static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGB32, -1}; /* * Decoder context @@ -89,7 +89,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 if (planes == 4) planes--; - px_inc = planes + (avctx->pix_fmt == PIX_FMT_RGBA32); + px_inc = planes + (avctx->pix_fmt == PIX_FMT_RGB32); for (p = 0; p < planes; p++) { /* Lines length pointer for this plane */ @@ -181,7 +181,7 @@ static int decode_init(AVCodecContext *avctx) c->planemap[2] = 0; // 3rd plane is blue break; case 32: - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; c->planes = 4; #ifdef WORDS_BIGENDIAN c->planemap[0] = 1; // 1st plane is red diff --git a/libavcodec/cscd.c b/libavcodec/cscd.c index d8733d6dd7..2e7d05c402 100644 --- a/libavcodec/cscd.c +++ b/libavcodec/cscd.c @@ -222,7 +222,7 @@ static int decode_init(AVCodecContext *avctx) { switch (avctx->bits_per_sample) { case 16: avctx->pix_fmt = PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; - case 32: avctx->pix_fmt = PIX_FMT_RGBA32; break; + case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "CamStudio codec error: invalid depth %i bpp\n", diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c index 1ca18a4e87..45f408c876 100644 --- a/libavcodec/ffv1.c +++ b/libavcodec/ffv1.c @@ -600,7 +600,7 @@ static int encode_init(AVCodecContext *avctx) case PIX_FMT_YUV410P: s->colorspace= 0; break; - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: s->colorspace= 1; break; default: @@ -895,7 +895,7 @@ static int read_header(FFV1Context *f){ av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n"); return -1; } - f->avctx->pix_fmt= PIX_FMT_RGBA32; + f->avctx->pix_fmt= PIX_FMT_RGB32; }else{ av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n"); return -1; @@ -1035,6 +1035,6 @@ AVCodec ffv1_encoder = { encode_init, encode_frame, common_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGBA32, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, -1}, }; #endif diff --git a/libavcodec/huffyuv.c b/libavcodec/huffyuv.c index 0aefd6d724..bf36eae062 100644 --- a/libavcodec/huffyuv.c +++ b/libavcodec/huffyuv.c @@ -452,7 +452,7 @@ s->bgr32=1; break; case 16: if(s->yuy2){ - avctx->pix_fmt = PIX_FMT_YUV422; + avctx->pix_fmt = PIX_FMT_YUYV422; }else{ avctx->pix_fmt = PIX_FMT_YUV422P; } @@ -460,7 +460,7 @@ s->bgr32=1; case 24: case 32: if(s->bgr32){ - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; }else{ avctx->pix_fmt = PIX_FMT_BGR24; } diff --git a/libavcodec/imgconvert.c b/libavcodec/imgconvert.c index b2305cd639..0f6542f8a0 100644 --- a/libavcodec/imgconvert.c +++ b/libavcodec/imgconvert.c @@ -91,8 +91,8 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .depth = 8, .x_chroma_shift = 0, .y_chroma_shift = 0, }, - [PIX_FMT_YUV422] = { - .name = "yuv422", + [PIX_FMT_YUYV422] = { + .name = "yuyv422", .nb_channels = 1, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PACKED, @@ -167,8 +167,8 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .depth = 8, .x_chroma_shift = 0, .y_chroma_shift = 0, }, - [PIX_FMT_RGBA32] = { - .name = "rgba32", + [PIX_FMT_RGB32] = { + .name = "rgb32", .nb_channels = 4, .is_alpha = 1, .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, @@ -243,8 +243,8 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { [PIX_FMT_XVMC_MPEG2_IDCT] = { .name = "xvmcidct", }, - [PIX_FMT_UYVY411] = { - .name = "uyvy411", + [PIX_FMT_UYYVYY411] = { + .name = "uyyvyy411", .nb_channels = 1, .color_type = FF_COLOR_YUV, .pixel_type = FF_PIXEL_PACKED, @@ -432,7 +432,7 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr, picture->data[2] = NULL; picture->linesize[0] = width * 3; return size * 3; - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: case PIX_FMT_BGR32: case PIX_FMT_RGB32_1: case PIX_FMT_BGR32_1: @@ -447,7 +447,7 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr, case PIX_FMT_BGR565: case PIX_FMT_RGB555: case PIX_FMT_RGB565: - case PIX_FMT_YUV422: + case PIX_FMT_YUYV422: picture->data[0] = ptr; picture->data[1] = NULL; picture->data[2] = NULL; @@ -459,7 +459,7 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr, picture->data[2] = NULL; picture->linesize[0] = width * 2; return size * 2; - case PIX_FMT_UYVY411: + case PIX_FMT_UYYVYY411: picture->data[0] = ptr; picture->data[1] = NULL; picture->data[2] = NULL; @@ -519,14 +519,14 @@ int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height, return -1; if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) { - if (pix_fmt == PIX_FMT_YUV422 || + if (pix_fmt == PIX_FMT_YUYV422 || pix_fmt == PIX_FMT_UYVY422 || pix_fmt == PIX_FMT_BGR565 || pix_fmt == PIX_FMT_BGR555 || pix_fmt == PIX_FMT_RGB565 || pix_fmt == PIX_FMT_RGB555) w = width * 2; - else if (pix_fmt == PIX_FMT_UYVY411) + else if (pix_fmt == PIX_FMT_UYYVYY411) w = width + width/2; else if (pix_fmt == PIX_FMT_PAL8) w = width; @@ -633,7 +633,7 @@ static int avg_bits_per_pixel(int pix_fmt) switch(pf->pixel_type) { case FF_PIXEL_PACKED: switch(pix_fmt) { - case PIX_FMT_YUV422: + case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_RGB565: case PIX_FMT_RGB555: @@ -641,7 +641,7 @@ static int avg_bits_per_pixel(int pix_fmt) case PIX_FMT_BGR555: bits = 16; break; - case PIX_FMT_UYVY411: + case PIX_FMT_UYYVYY411: bits = 12; break; default: @@ -753,7 +753,7 @@ void img_copy(AVPicture *dst, const AVPicture *src, switch(pf->pixel_type) { case FF_PIXEL_PACKED: switch(pix_fmt) { - case PIX_FMT_YUV422: + case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_RGB565: case PIX_FMT_RGB555: @@ -761,7 +761,7 @@ void img_copy(AVPicture *dst, const AVPicture *src, case PIX_FMT_BGR555: bits = 16; break; - case PIX_FMT_UYVY411: + case PIX_FMT_UYYVYY411: bits = 12; break; default: @@ -1931,7 +1931,7 @@ typedef struct ConvertEntry { - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8 - - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32 + - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32 - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from PIX_FMT_RGB24. @@ -1942,7 +1942,7 @@ typedef struct ConvertEntry { */ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_YUV420P] = { - [PIX_FMT_YUV422] = { + [PIX_FMT_YUYV422] = { .convert = yuv420p_to_yuv422, }, [PIX_FMT_RGB555] = { @@ -1957,7 +1957,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_RGB24] = { .convert = yuv420p_to_rgb24 }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = yuv420p_to_rgba32 }, [PIX_FMT_UYVY422] = { @@ -1965,7 +1965,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { }, }, [PIX_FMT_YUV422P] = { - [PIX_FMT_YUV422] = { + [PIX_FMT_YUYV422] = { .convert = yuv422p_to_yuv422, }, [PIX_FMT_UYVY422] = { @@ -1990,7 +1990,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_RGB24] = { .convert = yuvj420p_to_rgb24 }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = yuvj420p_to_rgba32 }, }, @@ -1999,7 +1999,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { .convert = yuvj444p_to_rgb24 }, }, - [PIX_FMT_YUV422] = { + [PIX_FMT_YUYV422] = { [PIX_FMT_YUV420P] = { .convert = yuv422_to_yuv420p, }, @@ -2025,7 +2025,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_RGB555] = { .convert = rgb24_to_rgb555 }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = rgb24_to_rgba32 }, [PIX_FMT_BGR24] = { @@ -2047,7 +2047,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { .convert = rgb24_to_yuvj444p }, }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { [PIX_FMT_RGB24] = { .convert = rgba32_to_rgb24 }, @@ -2071,7 +2071,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { }, }, [PIX_FMT_BGR24] = { - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = bgr24_to_rgba32 }, [PIX_FMT_RGB24] = { @@ -2088,7 +2088,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_RGB24] = { .convert = rgb555_to_rgb24 }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = rgb555_to_rgba32 }, [PIX_FMT_YUV420P] = { @@ -2099,7 +2099,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { }, }, [PIX_FMT_RGB565] = { - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = rgb565_to_rgba32 }, [PIX_FMT_RGB24] = { @@ -2141,7 +2141,7 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_BGR24] = { .convert = gray_to_bgr24 }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = gray_to_rgba32 }, [PIX_FMT_MONOWHITE] = { @@ -2180,11 +2180,11 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = { [PIX_FMT_RGB24] = { .convert = pal8_to_rgb24 }, - [PIX_FMT_RGBA32] = { + [PIX_FMT_RGB32] = { .convert = pal8_to_rgba32 }, }, - [PIX_FMT_UYVY411] = { + [PIX_FMT_UYYVYY411] = { [PIX_FMT_YUV411P] = { .convert = uyvy411_to_yuv411p, }, @@ -2499,16 +2499,16 @@ int img_convert(AVPicture *dst, int dst_pix_fmt, no_chroma_filter: /* try to use an intermediate format */ - if (src_pix_fmt == PIX_FMT_YUV422 || - dst_pix_fmt == PIX_FMT_YUV422) { + if (src_pix_fmt == PIX_FMT_YUYV422 || + dst_pix_fmt == PIX_FMT_YUYV422) { /* specific case: convert to YUV422P first */ int_pix_fmt = PIX_FMT_YUV422P; } else if (src_pix_fmt == PIX_FMT_UYVY422 || dst_pix_fmt == PIX_FMT_UYVY422) { /* specific case: convert to YUV422P first */ int_pix_fmt = PIX_FMT_YUV422P; - } else if (src_pix_fmt == PIX_FMT_UYVY411 || - dst_pix_fmt == PIX_FMT_UYVY411) { + } else if (src_pix_fmt == PIX_FMT_UYYVYY411 || + dst_pix_fmt == PIX_FMT_UYYVYY411) { /* specific case: convert to YUV411P first */ int_pix_fmt = PIX_FMT_YUV411P; } else if ((src_pix->color_type == FF_COLOR_GRAY && @@ -2536,7 +2536,7 @@ int img_convert(AVPicture *dst, int dst_pix_fmt, } else { /* the two formats are rgb or gray8 or yuv[j]444p */ if (src_pix->is_alpha && dst_pix->is_alpha) - int_pix_fmt = PIX_FMT_RGBA32; + int_pix_fmt = PIX_FMT_RGB32; else int_pix_fmt = PIX_FMT_RGB24; } @@ -2597,7 +2597,7 @@ int img_get_alpha_info(const AVPicture *src, if (!pf->is_alpha) return 0; switch(pix_fmt) { - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: ret = get_alpha_info_rgba32(src, width, height); break; case PIX_FMT_PAL8: diff --git a/libavcodec/loco.c b/libavcodec/loco.c index b1f99f425b..760699d451 100644 --- a/libavcodec/loco.c +++ b/libavcodec/loco.c @@ -262,7 +262,7 @@ static int decode_init(AVCodecContext *avctx){ avctx->pix_fmt = PIX_FMT_YUV420P; break; case LOCO_CRGBA: case LOCO_RGBA: - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_INFO, "Unknown colorspace, index = %i\n", l->mode); diff --git a/libavcodec/mjpeg.c b/libavcodec/mjpeg.c index 09ff94658a..1dc66b7423 100644 --- a/libavcodec/mjpeg.c +++ b/libavcodec/mjpeg.c @@ -441,7 +441,7 @@ void mjpeg_picture_header(MpegEncContext *s) } put_bits(&s->pb, 16, 17); - if(lossless && s->avctx->pix_fmt == PIX_FMT_RGBA32) + if(lossless && s->avctx->pix_fmt == PIX_FMT_RGB32) put_bits(&s->pb, 8, 9); /* 9 bits/component RCT */ else put_bits(&s->pb, 8, 8); /* 8 bits/component */ @@ -700,7 +700,7 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in s->header_bits= put_bits_count(&s->pb); - if(avctx->pix_fmt == PIX_FMT_RGBA32){ + if(avctx->pix_fmt == PIX_FMT_RGB32){ int x, y, i; const int linesize= p->linesize[0]; uint16_t (*buffer)[4]= (void *) s->rd_scratchpad; @@ -1209,7 +1209,7 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s) case 0x222222: case 0x111111: if(s->rgb){ - s->avctx->pix_fmt = PIX_FMT_RGBA32; + s->avctx->pix_fmt = PIX_FMT_RGB32; }else if(s->nb_components==3) s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV444P : PIX_FMT_YUVJ444P; else diff --git a/libavcodec/png.c b/libavcodec/png.c index c7a2bd6e33..c796c34321 100644 --- a/libavcodec/png.c +++ b/libavcodec/png.c @@ -562,7 +562,7 @@ static int decode_frame(AVCodecContext *avctx, avctx->pix_fmt = PIX_FMT_RGB24; } else if (s->bit_depth == 8 && s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) { - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; } else if (s->bit_depth == 8 && s->color_type == PNG_COLOR_TYPE_GRAY) { avctx->pix_fmt = PIX_FMT_GRAY8; @@ -782,7 +782,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, is_progressive = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT); switch(avctx->pix_fmt) { - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: bit_depth = 8; color_type = PNG_COLOR_TYPE_RGB_ALPHA; break; @@ -961,6 +961,6 @@ AVCodec png_encoder = { common_init, encode_frame, NULL, //encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, -1}, }; #endif // CONFIG_PNG_ENCODER diff --git a/libavcodec/pnm.c b/libavcodec/pnm.c index 610bb28be6..4c9c46fee3 100644 --- a/libavcodec/pnm.c +++ b/libavcodec/pnm.c @@ -124,7 +124,7 @@ static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){ } else if (depth == 3) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (depth == 4) { - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; } else { return -1; } @@ -240,7 +240,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, } } break; - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: ptr = p->data[0]; linesize = p->linesize[0]; if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end) @@ -389,7 +389,7 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu maxval = 255; tuple_type = "RGB"; break; - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: n = w * 4; depth = 4; maxval = 255; @@ -406,7 +406,7 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu ptr = p->data[0]; linesize = p->linesize[0]; - if (avctx->pix_fmt == PIX_FMT_RGBA32) { + if (avctx->pix_fmt == PIX_FMT_RGB32) { int j; unsigned int v; @@ -601,6 +601,6 @@ AVCodec pam_encoder = { pam_encode_frame, NULL, //encode_end, pnm_decode_frame, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1}, }; #endif // CONFIG_PAM_ENCODER diff --git a/libavcodec/qtrle.c b/libavcodec/qtrle.c index 0ccca28c63..415f08098e 100644 --- a/libavcodec/qtrle.c +++ b/libavcodec/qtrle.c @@ -29,7 +29,7 @@ * The QT RLE decoder has seven modes of operation: * 1, 2, 4, 8, 16, 24, and 32 bits per pixel. For modes 1, 2, 4, and 8 * the decoder outputs PAL8 colorspace data. 16-bit data yields RGB555 - * data. 24-bit data is RGB24 and 32-bit data is RGBA32. + * data. 24-bit data is RGB24 and 32-bit data is RGB32. */ #include <stdio.h> @@ -515,7 +515,7 @@ static int qtrle_decode_init(AVCodecContext *avctx) break; case 32: - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; break; default: diff --git a/libavcodec/raw.c b/libavcodec/raw.c index f4fddf73cc..f494f8a1c9 100644 --- a/libavcodec/raw.c +++ b/libavcodec/raw.c @@ -48,8 +48,8 @@ static const PixelFormatTag pixelFormatTags[] = { { PIX_FMT_GRAY8, MKTAG(' ', ' ', 'Y', '8') }, - { PIX_FMT_YUV422, MKTAG('Y', 'U', 'Y', '2') }, /* Packed formats */ - { PIX_FMT_YUV422, MKTAG('Y', '4', '2', '2') }, + { PIX_FMT_YUYV422, MKTAG('Y', 'U', 'Y', '2') }, /* Packed formats */ + { PIX_FMT_YUYV422, MKTAG('Y', '4', '2', '2') }, { PIX_FMT_UYVY422, MKTAG('U', 'Y', 'V', 'Y') }, { PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') }, { PIX_FMT_RGB555, MKTAG('R', 'G', 'B', 15) }, @@ -99,7 +99,7 @@ static int raw_init_decoder(AVCodecContext *avctx) case 15: avctx->pix_fmt= PIX_FMT_RGB555; break; case 16: avctx->pix_fmt= PIX_FMT_RGB555; break; case 24: avctx->pix_fmt= PIX_FMT_BGR24 ; break; - case 32: avctx->pix_fmt= PIX_FMT_RGBA32; break; + case 32: avctx->pix_fmt= PIX_FMT_RGB32; break; } } diff --git a/libavcodec/snow.c b/libavcodec/snow.c index 229a9d3fcc..270cafc802 100644 --- a/libavcodec/snow.c +++ b/libavcodec/snow.c @@ -3991,7 +3991,7 @@ static int encode_init(AVCodecContext *avctx) // case PIX_FMT_YUV410P: s->colorspace_type= 0; break; -/* case PIX_FMT_RGBA32: +/* case PIX_FMT_RGB32: s->colorspace= 1; break;*/ default: diff --git a/libavcodec/targa.c b/libavcodec/targa.c index 4ad11cc13a..d637bedae5 100644 --- a/libavcodec/targa.c +++ b/libavcodec/targa.c @@ -132,7 +132,7 @@ static int decode_frame(AVCodecContext *avctx, avctx->pix_fmt = PIX_FMT_BGR24; break; case 32: - avctx->pix_fmt = PIX_FMT_RGBA32; + avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", s->bpp); diff --git a/libavcodec/truemotion1.c b/libavcodec/truemotion1.c index f1673a80aa..7ee140c1ef 100644 --- a/libavcodec/truemotion1.c +++ b/libavcodec/truemotion1.c @@ -417,7 +417,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s) // FIXME: where to place this ?!?! if (compression_types[header.compression].algorithm == ALGO_RGB24H) - s->avctx->pix_fmt = PIX_FMT_RGBA32; + s->avctx->pix_fmt = PIX_FMT_RGB32; else s->avctx->pix_fmt = PIX_FMT_RGB555; // RGB565 is supported aswell diff --git a/libavcodec/tscc.c b/libavcodec/tscc.c index e379abbbf8..c2c04f14f3 100644 --- a/libavcodec/tscc.c +++ b/libavcodec/tscc.c @@ -283,7 +283,7 @@ static int decode_init(AVCodecContext *avctx) case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; - case 32: avctx->pix_fmt = PIX_FMT_RGBA32; break; + case 32: avctx->pix_fmt = PIX_FMT_RGB32; break; default: av_log(avctx, AV_LOG_ERROR, "Camtasia error: unknown depth %i bpp\n", avctx->bits_per_sample); return -1; } diff --git a/libavcodec/utils.c b/libavcodec/utils.c index f61fbdb57b..61b0297723 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -172,7 +172,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ switch(s->pix_fmt){ case PIX_FMT_YUV420P: - case PIX_FMT_YUV422: + case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: @@ -186,7 +186,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ h_align= 16; break; case PIX_FMT_YUV411P: - case PIX_FMT_UYVY411: + case PIX_FMT_UYYVYY411: w_align=32; h_align=8; break; diff --git a/libavformat/dc1394.c b/libavformat/dc1394.c index 5098c0fdfc..70bbf2e75a 100644 --- a/libavformat/dc1394.c +++ b/libavformat/dc1394.c @@ -42,7 +42,7 @@ struct dc1394_frame_format { int frame_size_id; } dc1394_frame_formats[] = { { 320, 240, PIX_FMT_UYVY422, MODE_320x240_YUV422 }, - { 640, 480, PIX_FMT_UYVY411, MODE_640x480_YUV411 }, + { 640, 480, PIX_FMT_UYYVYY411, MODE_640x480_YUV411 }, { 640, 480, PIX_FMT_UYVY422, MODE_640x480_YUV422 }, { 0, 0, 0, MODE_320x240_YUV422 } /* default -- gotta be the last one */ }; diff --git a/libavformat/grab.c b/libavformat/grab.c index 4f37b87fad..4395c18846 100644 --- a/libavformat/grab.c +++ b/libavformat/grab.c @@ -124,7 +124,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) if (ap->pix_fmt == PIX_FMT_YUV420P) { desired_palette = VIDEO_PALETTE_YUV420P; desired_depth = 12; - } else if (ap->pix_fmt == PIX_FMT_YUV422) { + } else if (ap->pix_fmt == PIX_FMT_YUYV422) { desired_palette = VIDEO_PALETTE_YUV422; desired_depth = 16; } else if (ap->pix_fmt == PIX_FMT_BGR24) { @@ -260,7 +260,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) break; case VIDEO_PALETTE_YUV422: frame_size = width * height * 2; - st->codec->pix_fmt = PIX_FMT_YUV422; + st->codec->pix_fmt = PIX_FMT_YUYV422; break; case VIDEO_PALETTE_RGB24: frame_size = width * height * 3; diff --git a/libavformat/sgi.c b/libavformat/sgi.c index 5adcb3791e..da6501b61e 100644 --- a/libavformat/sgi.c +++ b/libavformat/sgi.c @@ -101,7 +101,7 @@ static int read_uncompressed_sgi(const SGIInfo *si, for (z = 0; z < si->zsize; z++) { #ifndef WORDS_BIGENDIAN - /* rgba -> bgra for rgba32 on little endian cpus */ + /* rgba -> bgra for rgb32 on little endian cpus */ if (si->zsize == 4 && z != 3) chan_offset = 2 - z; else @@ -130,7 +130,7 @@ static int expand_rle_row(ByteIOContext *f, unsigned char *optr, int length = 0; #ifndef WORDS_BIGENDIAN - /* rgba -> bgra for rgba32 on little endian cpus */ + /* rgba -> bgra for rgb32 on little endian cpus */ if (pixelstride == 4 && chan_offset != 3) { chan_offset = 2 - chan_offset; } @@ -241,7 +241,7 @@ static int sgi_read(ByteIOContext *f, } else if (s->zsize == SGI_RGB) { info->pix_fmt = PIX_FMT_RGB24; } else if (s->zsize == SGI_RGBA) { - info->pix_fmt = PIX_FMT_RGBA32; + info->pix_fmt = PIX_FMT_RGB32; } else { return AVERROR_INVALIDDATA; } @@ -386,7 +386,7 @@ static int sgi_write(ByteIOContext *pb, AVImageInfo *info) si->dimension = SGI_MULTI_CHAN; si->zsize = SGI_RGB; break; - case PIX_FMT_RGBA32: + case PIX_FMT_RGB32: si->dimension = SGI_MULTI_CHAN; si->zsize = SGI_RGBA; break; @@ -408,7 +408,7 @@ static int sgi_write(ByteIOContext *pb, AVImageInfo *info) for (z = 0; z < si->zsize; z++) { #ifndef WORDS_BIGENDIAN - /* rgba -> bgra for rgba32 on little endian cpus */ + /* rgba -> bgra for rgb32 on little endian cpus */ if (si->zsize == 4 && z != 3) chan_offset = 2 - z; else @@ -451,7 +451,7 @@ AVImageFormat sgi_image_format = { "sgi,rgb,rgba,bw", sgi_probe, sgi_read, - (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGBA32), + (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGB32), #ifdef CONFIG_MUXERS sgi_write, #else diff --git a/libavformat/v4l2.c b/libavformat/v4l2.c index d6384e1e05..d5754aef49 100644 --- a/libavformat/v4l2.c +++ b/libavformat/v4l2.c @@ -74,7 +74,7 @@ static struct fmt_map fmt_conversion_table[] = { .v4l2_fmt = V4L2_PIX_FMT_YUV422P, }, { - .ff_fmt = PIX_FMT_YUV422, + .ff_fmt = PIX_FMT_YUYV422, .v4l2_fmt = V4L2_PIX_FMT_YUYV, }, { @@ -99,7 +99,7 @@ static struct fmt_map fmt_conversion_table[] = { }, /* { - .ff_fmt = PIX_FMT_RGBA32, + .ff_fmt = PIX_FMT_RGB32, .v4l2_fmt = V4L2_PIX_FMT_BGR32, }, */ diff --git a/libavformat/x11grab.c b/libavformat/x11grab.c index 86b13ed548..f3e5579fb8 100644 --- a/libavformat/x11grab.c +++ b/libavformat/x11grab.c @@ -221,7 +221,7 @@ x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) return AVERROR_IO; } #endif - input_pixfmt = PIX_FMT_RGBA32; + input_pixfmt = PIX_FMT_RGB32; break; default: av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel); diff --git a/libavutil/avutil.h b/libavutil/avutil.h index d85755cd8b..429a13c2b7 100644 --- a/libavutil/avutil.h +++ b/libavutil/avutil.h @@ -26,16 +26,11 @@ * external api header. */ - -#ifdef __cplusplus -extern "C" { -#endif - #define AV_STRINGIFY(s) AV_TOSTRING(s) #define AV_TOSTRING(s) #s -#define LIBAVUTIL_VERSION_INT ((49<<16)+(3<<8)+0) -#define LIBAVUTIL_VERSION 49.3.0 +#define LIBAVUTIL_VERSION_INT ((50<<16)+(0<<8)+0) +#define LIBAVUTIL_VERSION 50.0.0 #define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT #define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) @@ -130,8 +125,4 @@ enum PixelFormat { #define PIX_FMT_YUV422 PIX_FMT_YUYV422 #endif -#ifdef __cplusplus -} -#endif - #endif /* AVUTIL_H */ diff --git a/vhook/imlib2.c b/vhook/imlib2.c index 1c137724f5..868182de5e 100644 --- a/vhook/imlib2.c +++ b/vhook/imlib2.c @@ -340,12 +340,12 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, imlib_context_set_image(image); data = imlib_image_get_data(); - avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGBA32, width, height); + avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGB32, width, height); // if we already got a SWS context, let's realloc if is not re-useable ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, width, height, pix_fmt, - width, height, PIX_FMT_RGBA32, + width, height, PIX_FMT_RGB32, sws_flags, NULL, NULL, NULL); if (ci->toRGB_convert_ctx == NULL) { av_log(NULL, AV_LOG_ERROR, @@ -430,7 +430,7 @@ void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, } ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, - width, height, PIX_FMT_RGBA32, + width, height, PIX_FMT_RGB32, width, height, pix_fmt, sws_flags, NULL, NULL, NULL); if (ci->fromRGB_convert_ctx == NULL) { diff --git a/vhook/watermark.c b/vhook/watermark.c index 4d2acd2aad..db9092ff1c 100644 --- a/vhook/watermark.c +++ b/vhook/watermark.c @@ -208,18 +208,18 @@ static void Process0(void *ctx, int thrG = ci->thrG; int thrB = ci->thrB; - if (pix_fmt != PIX_FMT_RGBA32) { + if (pix_fmt != PIX_FMT_RGB32) { int size; - size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height); + size = avpicture_get_size(PIX_FMT_RGB32, src_width, src_height); buf = av_malloc(size); - avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height); + avpicture_fill(&picture1, buf, PIX_FMT_RGB32, src_width, src_height); // if we already got a SWS context, let's realloc if is not re-useable ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, src_width, src_height, pix_fmt, - src_width, src_height, PIX_FMT_RGBA32, + src_width, src_height, PIX_FMT_RGB32, sws_flags, NULL, NULL, NULL); if (ci->toRGB_convert_ctx == NULL) { av_log(NULL, AV_LOG_ERROR, @@ -248,7 +248,7 @@ static void Process0(void *ctx, ym_size = ci->y_size; // I'll do the *4 => <<2 crap later. Most compilers understand that anyway. - // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner. + // According to avcodec.h PIX_FMT_RGB32 is handled in endian specific manner. for (y=0; y<src_height; y++) { offs = y * (src_width * 4); offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs! @@ -291,9 +291,9 @@ static void Process0(void *ctx, - if (pix_fmt != PIX_FMT_RGBA32) { + if (pix_fmt != PIX_FMT_RGB32) { ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, - src_width, src_height, PIX_FMT_RGBA32, + src_width, src_height, PIX_FMT_RGB32, src_width, src_height, pix_fmt, sws_flags, NULL, NULL, NULL); if (ci->fromRGB_convert_ctx == NULL) { @@ -339,18 +339,18 @@ static void Process1(void *ctx, uint32_t pixel; uint32_t pixelm; - if (pix_fmt != PIX_FMT_RGBA32) { + if (pix_fmt != PIX_FMT_RGB32) { int size; - size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height); + size = avpicture_get_size(PIX_FMT_RGB32, src_width, src_height); buf = av_malloc(size); - avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height); + avpicture_fill(&picture1, buf, PIX_FMT_RGB32, src_width, src_height); // if we already got a SWS context, let's realloc if is not re-useable ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, src_width, src_height, pix_fmt, - src_width, src_height, PIX_FMT_RGBA32, + src_width, src_height, PIX_FMT_RGB32, sws_flags, NULL, NULL, NULL); if (ci->toRGB_convert_ctx == NULL) { av_log(NULL, AV_LOG_ERROR, @@ -379,7 +379,7 @@ static void Process1(void *ctx, ym_size = ci->y_size; // I'll do the *4 => <<2 crap later. Most compilers understand that anyway. - // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner. + // According to avcodec.h PIX_FMT_RGB32 is handled in endian specific manner. for (y=0; y<src_height; y++) { offs = y * (src_width * 4); offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs! @@ -402,9 +402,9 @@ static void Process1(void *ctx, } // foreach X } // foreach Y - if (pix_fmt != PIX_FMT_RGBA32) { + if (pix_fmt != PIX_FMT_RGB32) { ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, - src_width, src_height, PIX_FMT_RGBA32, + src_width, src_height, PIX_FMT_RGB32, src_width, src_height, pix_fmt, sws_flags, NULL, NULL, NULL); if (ci->fromRGB_convert_ctx == NULL) { @@ -577,12 +577,12 @@ int get_watermark_picture(ContextInfo *ci, int cleanup) } // Determine required buffer size and allocate buffer - ci->numBytes = avpicture_get_size(PIX_FMT_RGBA32, ci->pCodecCtx->width, + ci->numBytes = avpicture_get_size(PIX_FMT_RGB32, ci->pCodecCtx->width, ci->pCodecCtx->height); ci->buffer = av_malloc(ci->numBytes); // Assign appropriate parts of buffer to image planes in pFrameRGB - avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGBA32, + avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGB32, ci->pCodecCtx->width, ci->pCodecCtx->height); } // TODO loop, pingpong etc? @@ -601,11 +601,11 @@ int get_watermark_picture(ContextInfo *ci, int cleanup) // Did we get a video frame? if(ci->frameFinished) { - // Convert the image from its native format to RGBA32 + // Convert the image from its native format to RGB32 ci->watermark_convert_ctx = sws_getCachedContext(ci->watermark_convert_ctx, ci->pCodecCtx->width, ci->pCodecCtx->height, ci->pCodecCtx->pix_fmt, - ci->pCodecCtx->width, ci->pCodecCtx->height, PIX_FMT_RGBA32, + ci->pCodecCtx->width, ci->pCodecCtx->height, PIX_FMT_RGB32, sws_flags, NULL, NULL, NULL); if (ci->watermark_convert_ctx == NULL) { av_log(NULL, AV_LOG_ERROR, |