aboutsummaryrefslogtreecommitdiffstats
path: root/libavcodec
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2012-12-23 15:03:15 +0100
committerMichael Niedermayer <michaelni@gmx.at>2012-12-23 15:03:15 +0100
commit8e09e183fc5fbddec153e41bb8f9b30eb6e11add (patch)
tree15ef362af8657ad573185826617e56a257cc69e3 /libavcodec
parentdef18e5470bdcddb9d737784cc6ae427c195da7c (diff)
parentb6d7d4efae60845a93be6948ef78482bba1fc291 (diff)
downloadffmpeg-8e09e183fc5fbddec153e41bb8f9b30eb6e11add.tar.gz
Merge remote-tracking branch 'qatar/master'
* qatar/master: 8bps: cosmetics aasc: cosmetics, reformat ansi: remove an extra return asvdec: cosmetics, reformat aura: cosmetics, reformat Conflicts: libavcodec/aasc.c libavcodec/asvdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/8bps.c25
-rw-r--r--libavcodec/aasc.c14
-rw-r--r--libavcodec/asvdec.c228
-rw-r--r--libavcodec/aura.c21
4 files changed, 144 insertions, 144 deletions
diff --git a/libavcodec/8bps.c b/libavcodec/8bps.c
index 6d56fa67db..cfeedb108e 100644
--- a/libavcodec/8bps.c
+++ b/libavcodec/8bps.c
@@ -44,9 +44,6 @@
static const enum AVPixelFormat pixfmt_rgb24[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE };
-/*
- * Decoder context
- */
typedef struct EightBpsContext {
AVCodecContext *avctx;
AVFrame pic;
@@ -57,12 +54,6 @@ typedef struct EightBpsContext {
uint32_t pal[256];
} EightBpsContext;
-
-/*
- *
- * Decode a frame
- *
- */
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
@@ -151,12 +142,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
-
-/*
- *
- * Init 8BPS decoder
- *
- */
static av_cold int decode_init(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
@@ -202,14 +187,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
-
-
-
-/*
- *
- * Uninit 8BPS decoder
- *
- */
static av_cold int decode_end(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
@@ -220,8 +197,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
return 0;
}
-
-
AVCodec ff_eightbps_decoder = {
.name = "8bps",
.type = AVMEDIA_TYPE_VIDEO,
diff --git a/libavcodec/aasc.c b/libavcodec/aasc.c
index 01ec062804..0d4704fc3e 100644
--- a/libavcodec/aasc.c
+++ b/libavcodec/aasc.c
@@ -79,8 +79,8 @@ static int aasc_decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
- AascContext *s = avctx->priv_data;
+ int buf_size = avpkt->size;
+ AascContext *s = avctx->priv_data;
int compr, i, stride, psize;
if (buf_size < 4) {
@@ -95,8 +95,8 @@ static int aasc_decode_frame(AVCodecContext *avctx,
return -1;
}
- compr = AV_RL32(buf);
- buf += 4;
+ compr = AV_RL32(buf);
+ buf += 4;
buf_size -= 4;
psize = avctx->bits_per_coded_sample / 8;
switch (avctx->codec_tag) {
@@ -105,11 +105,11 @@ static int aasc_decode_frame(AVCodecContext *avctx,
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
break;
case MKTAG('A', 'A', 'S', 'C'):
- switch(compr){
+ switch (compr) {
case 0:
stride = (avctx->width * psize + psize) & ~psize;
- for(i = avctx->height - 1; i >= 0; i--){
- if(avctx->width * psize > buf_size){
+ for (i = avctx->height - 1; i >= 0; i--) {
+ if (avctx->width * psize > buf_size) {
av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
break;
}
diff --git a/libavcodec/asvdec.c b/libavcodec/asvdec.c
index 8282511239..7c3b30c8db 100644
--- a/libavcodec/asvdec.c
+++ b/libavcodec/asvdec.c
@@ -43,124 +43,148 @@ static VLC dc_ccp_vlc;
static VLC ac_ccp_vlc;
static VLC asv2_level_vlc;
-static av_cold void init_vlcs(ASV1Context *a){
+static av_cold void init_vlcs(ASV1Context *a)
+{
static int done = 0;
if (!done) {
done = 1;
INIT_VLC_STATIC(&ccp_vlc, VLC_BITS, 17,
- &ff_asv_ccp_tab[0][1], 2, 1,
- &ff_asv_ccp_tab[0][0], 2, 1, 64);
+ &ff_asv_ccp_tab[0][1], 2, 1,
+ &ff_asv_ccp_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&dc_ccp_vlc, VLC_BITS, 8,
- &ff_asv_dc_ccp_tab[0][1], 2, 1,
- &ff_asv_dc_ccp_tab[0][0], 2, 1, 64);
+ &ff_asv_dc_ccp_tab[0][1], 2, 1,
+ &ff_asv_dc_ccp_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&ac_ccp_vlc, VLC_BITS, 16,
- &ff_asv_ac_ccp_tab[0][1], 2, 1,
- &ff_asv_ac_ccp_tab[0][0], 2, 1, 64);
+ &ff_asv_ac_ccp_tab[0][1], 2, 1,
+ &ff_asv_ac_ccp_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&level_vlc, VLC_BITS, 7,
- &ff_asv_level_tab[0][1], 2, 1,
- &ff_asv_level_tab[0][0], 2, 1, 64);
+ &ff_asv_level_tab[0][1], 2, 1,
+ &ff_asv_level_tab[0][0], 2, 1, 64);
INIT_VLC_STATIC(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63,
- &ff_asv2_level_tab[0][1], 2, 1,
- &ff_asv2_level_tab[0][0], 2, 1, 1024);
+ &ff_asv2_level_tab[0][1], 2, 1,
+ &ff_asv2_level_tab[0][0], 2, 1, 1024);
}
}
//FIXME write a reversed bitstream reader to avoid the double reverse
-static inline int asv2_get_bits(GetBitContext *gb, int n){
- return ff_reverse[ get_bits(gb, n) << (8-n) ];
+static inline int asv2_get_bits(GetBitContext *gb, int n)
+{
+ return ff_reverse[get_bits(gb, n) << (8-n)];
}
-static inline int asv1_get_level(GetBitContext *gb){
- int code= get_vlc2(gb, level_vlc.table, VLC_BITS, 1);
+static inline int asv1_get_level(GetBitContext *gb)
+{
+ int code = get_vlc2(gb, level_vlc.table, VLC_BITS, 1);
- if(code==3) return get_sbits(gb, 8);
- else return code - 3;
+ if (code == 3)
+ return get_sbits(gb, 8);
+ else
+ return code - 3;
}
-static inline int asv2_get_level(GetBitContext *gb){
- int code= get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);
+static inline int asv2_get_level(GetBitContext *gb)
+{
+ int code = get_vlc2(gb, asv2_level_vlc.table, ASV2_LEVEL_VLC_BITS, 1);
- if(code==31) return (int8_t)asv2_get_bits(gb, 8);
- else return code - 31;
+ if (code == 31)
+ return (int8_t)asv2_get_bits(gb, 8);
+ else
+ return code - 31;
}
-static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]){
+static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64])
+{
int i;
- block[0]= 8*get_bits(&a->gb, 8);
+ block[0] = 8 * get_bits(&a->gb, 8);
- for(i=0; i<11; i++){
- const int ccp= get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);
+ for (i = 0; i < 11; i++) {
+ const int ccp = get_vlc2(&a->gb, ccp_vlc.table, VLC_BITS, 1);
- if(ccp){
- if(ccp == 16) break;
- if(ccp < 0 || i>=10){
+ if (ccp) {
+ if (ccp == 16)
+ break;
+ if (ccp < 0 || i >= 10) {
av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
return -1;
}
- if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
- if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
- if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
- if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv1_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
+ if (ccp & 8)
+ block[a->scantable.permutated[4 * i + 0]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 0]) >> 4;
+ if (ccp & 4)
+ block[a->scantable.permutated[4 * i + 1]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 1]) >> 4;
+ if (ccp & 2)
+ block[a->scantable.permutated[4 * i + 2]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 2]) >> 4;
+ if (ccp & 1)
+ block[a->scantable.permutated[4 * i + 3]] = (asv1_get_level(&a->gb) * a->intra_matrix[4 * i + 3]) >> 4;
}
}
return 0;
}
-static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64]){
+static inline int asv2_decode_block(ASV1Context *a, DCTELEM block[64])
+{
int i, count, ccp;
- count= asv2_get_bits(&a->gb, 4);
+ count = asv2_get_bits(&a->gb, 4);
- block[0]= 8*asv2_get_bits(&a->gb, 8);
+ block[0] = 8 * asv2_get_bits(&a->gb, 8);
- ccp= get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
- if(ccp){
- if(ccp&4) block[a->scantable.permutated[1]]= (asv2_get_level(&a->gb) * a->intra_matrix[1])>>4;
- if(ccp&2) block[a->scantable.permutated[2]]= (asv2_get_level(&a->gb) * a->intra_matrix[2])>>4;
- if(ccp&1) block[a->scantable.permutated[3]]= (asv2_get_level(&a->gb) * a->intra_matrix[3])>>4;
+ ccp = get_vlc2(&a->gb, dc_ccp_vlc.table, VLC_BITS, 1);
+ if (ccp) {
+ if (ccp & 4)
+ block[a->scantable.permutated[1]] = (asv2_get_level(&a->gb) * a->intra_matrix[1]) >> 4;
+ if (ccp & 2)
+ block[a->scantable.permutated[2]] = (asv2_get_level(&a->gb) * a->intra_matrix[2]) >> 4;
+ if (ccp & 1)
+ block[a->scantable.permutated[3]] = (asv2_get_level(&a->gb) * a->intra_matrix[3]) >> 4;
}
- for(i=1; i<count+1; i++){
- const int ccp= get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);
-
- if(ccp){
- if(ccp&8) block[a->scantable.permutated[4*i+0]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+0])>>4;
- if(ccp&4) block[a->scantable.permutated[4*i+1]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+1])>>4;
- if(ccp&2) block[a->scantable.permutated[4*i+2]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+2])>>4;
- if(ccp&1) block[a->scantable.permutated[4*i+3]]= (asv2_get_level(&a->gb) * a->intra_matrix[4*i+3])>>4;
+ for (i = 1; i < count + 1; i++) {
+ const int ccp = get_vlc2(&a->gb, ac_ccp_vlc.table, VLC_BITS, 1);
+
+ if (ccp) {
+ if (ccp & 8)
+ block[a->scantable.permutated[4*i + 0]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 0]) >> 4;
+ if (ccp & 4)
+ block[a->scantable.permutated[4*i + 1]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 1]) >> 4;
+ if (ccp & 2)
+ block[a->scantable.permutated[4*i + 2]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 2]) >> 4;
+ if (ccp & 1)
+ block[a->scantable.permutated[4*i + 3]] = (asv2_get_level(&a->gb) * a->intra_matrix[4*i + 3]) >> 4;
}
}
return 0;
}
-static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
+static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64])
+{
int i;
a->dsp.clear_blocks(block[0]);
- if(a->avctx->codec_id == AV_CODEC_ID_ASV1){
- for(i=0; i<6; i++){
- if( asv1_decode_block(a, block[i]) < 0)
+ if (a->avctx->codec_id == AV_CODEC_ID_ASV1) {
+ for (i = 0; i < 6; i++) {
+ if (asv1_decode_block(a, block[i]) < 0)
return -1;
}
- }else{
- for(i=0; i<6; i++){
- if( asv2_decode_block(a, block[i]) < 0)
+ } else {
+ for (i = 0; i < 6; i++) {
+ if (asv2_decode_block(a, block[i]) < 0)
return -1;
}
}
return 0;
}
-static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
- DCTELEM (*block)[64]= a->block;
- int linesize= a->picture.linesize[0];
+static inline void idct_put(ASV1Context *a, int mb_x, int mb_y)
+{
+ DCTELEM (*block)[64] = a->block;
+ int linesize = a->picture.linesize[0];
uint8_t *dest_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *dest_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
@@ -171,7 +195,7 @@ static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
a->dsp.idct_put(dest_y + 8*linesize , linesize, block[2]);
a->dsp.idct_put(dest_y + 8*linesize + 8, linesize, block[3]);
- if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
+ if (!(a->avctx->flags&CODEC_FLAG_GRAY)) {
a->dsp.idct_put(dest_cb, a->picture.linesize[1], block[4]);
a->dsp.idct_put(dest_cr, a->picture.linesize[2], block[5]);
}
@@ -181,62 +205,62 @@ static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
ASV1Context * const a = avctx->priv_data;
- AVFrame *picture = data;
- AVFrame * const p= &a->picture;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ AVFrame *picture = data;
+ AVFrame * const p = &a->picture;
int mb_x, mb_y;
- if(p->data[0])
+ if (p->data[0])
avctx->release_buffer(avctx, p);
- p->reference= 0;
- if(ff_get_buffer(avctx, p) < 0){
+ p->reference = 0;
+ if (ff_get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
- p->pict_type= AV_PICTURE_TYPE_I;
- p->key_frame= 1;
+ p->pict_type = AV_PICTURE_TYPE_I;
+ p->key_frame = 1;
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
buf_size);
if (!a->bitstream_buffer)
return AVERROR(ENOMEM);
- if(avctx->codec_id == AV_CODEC_ID_ASV1)
+ if (avctx->codec_id == AV_CODEC_ID_ASV1)
a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
- else{
+ else {
int i;
- for(i=0; i<buf_size; i++)
- a->bitstream_buffer[i]= ff_reverse[ buf[i] ];
+ for (i = 0; i < buf_size; i++)
+ a->bitstream_buffer[i] = ff_reverse[buf[i]];
}
init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);
- for(mb_y=0; mb_y<a->mb_height2; mb_y++){
- for(mb_x=0; mb_x<a->mb_width2; mb_x++){
- if( decode_mb(a, a->block) <0)
+ for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
+ for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
+ if (decode_mb(a, a->block) < 0)
return -1;
idct_put(a, mb_x, mb_y);
}
}
- if(a->mb_width2 != a->mb_width){
- mb_x= a->mb_width2;
- for(mb_y=0; mb_y<a->mb_height2; mb_y++){
- if( decode_mb(a, a->block) <0)
+ if (a->mb_width2 != a->mb_width) {
+ mb_x = a->mb_width2;
+ for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
+ if (decode_mb(a, a->block) < 0)
return -1;
idct_put(a, mb_x, mb_y);
}
}
- if(a->mb_height2 != a->mb_height){
- mb_y= a->mb_height2;
- for(mb_x=0; mb_x<a->mb_width; mb_x++){
- if( decode_mb(a, a->block) <0)
+ if (a->mb_height2 != a->mb_height) {
+ mb_y = a->mb_height2;
+ for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
+ if (decode_mb(a, a->block) < 0)
return -1;
idct_put(a, mb_x, mb_y);
@@ -248,50 +272,52 @@ static int decode_frame(AVCodecContext *avctx,
emms_c();
- return (get_bits_count(&a->gb)+31)/32*4;
+ return (get_bits_count(&a->gb) + 31) / 32 * 4;
}
-static av_cold int decode_init(AVCodecContext *avctx){
+static av_cold int decode_init(AVCodecContext *avctx)
+{
ASV1Context * const a = avctx->priv_data;
- AVFrame *p= &a->picture;
+ AVFrame *p = &a->picture;
+ const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
int i;
- const int scale= avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
ff_asv_common_init(avctx);
init_vlcs(a);
ff_init_scantable(a->dsp.idct_permutation, &a->scantable, ff_asv_scantab);
- avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- if(avctx->extradata_size < 1 || (a->inv_qscale= avctx->extradata[0]) == 0){
+ if (avctx->extradata_size < 1 || (a->inv_qscale = avctx->extradata[0]) == 0) {
av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
- if(avctx->codec_id == AV_CODEC_ID_ASV1)
- a->inv_qscale= 6;
+ if (avctx->codec_id == AV_CODEC_ID_ASV1)
+ a->inv_qscale = 6;
else
- a->inv_qscale= 10;
+ a->inv_qscale = 10;
}
- for(i=0; i<64; i++){
+ for (i = 0; i < 64; i++) {
int index = ff_asv_scantab[i];
- a->intra_matrix[i]= 64*scale*ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
+ a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale;
}
- p->qstride= a->mb_width;
- p->qscale_table= av_malloc( p->qstride * a->mb_height);
- p->quality= (32*scale + a->inv_qscale/2)/a->inv_qscale;
- memset(p->qscale_table, p->quality, p->qstride*a->mb_height);
+ p->qstride = a->mb_width;
+ p->qscale_table = av_malloc(p->qstride * a->mb_height);
+ p->quality = (32 * scale + a->inv_qscale / 2) / a->inv_qscale;
+ memset(p->qscale_table, p->quality, p->qstride * a->mb_height);
return 0;
}
-static av_cold int decode_end(AVCodecContext *avctx){
+static av_cold int decode_end(AVCodecContext *avctx)
+{
ASV1Context * const a = avctx->priv_data;
av_freep(&a->bitstream_buffer);
av_freep(&a->picture.qscale_table);
- a->bitstream_buffer_size=0;
+ a->bitstream_buffer_size = 0;
- if(a->picture.data[0])
+ if (a->picture.data[0])
avctx->release_buffer(avctx, &a->picture);
return 0;
diff --git a/libavcodec/aura.c b/libavcodec/aura.c
index e510a9f64c..cd4e42b609 100644
--- a/libavcodec/aura.c
+++ b/libavcodec/aura.c
@@ -50,8 +50,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *pkt)
{
- AuraDecodeContext *s=avctx->priv_data;
-
+ AuraDecodeContext *s = avctx->priv_data;
uint8_t *Y, *U, *V;
uint8_t val;
int x, y;
@@ -69,12 +68,12 @@ static int aura_decode_frame(AVCodecContext *avctx,
/* pixel data starts 48 bytes in, after 3x16-byte tables */
buf += 48;
- if(s->frame.data[0])
+ if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0;
- if(ff_get_buffer(avctx, &s->frame) < 0) {
+ if (ff_get_buffer(avctx, &s->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
@@ -86,23 +85,23 @@ static int aura_decode_frame(AVCodecContext *avctx,
/* iterate through each line in the height */
for (y = 0; y < avctx->height; y++) {
/* reset predictors */
- val = *buf++;
+ val = *buf++;
U[0] = val & 0xF0;
Y[0] = val << 4;
- val = *buf++;
+ val = *buf++;
V[0] = val & 0xF0;
Y[1] = Y[0] + delta_table[val & 0xF];
- Y += 2; U++; V++;
+ Y += 2; U++; V++;
/* iterate through the remaining pixel groups (4 pixels/group) */
for (x = 1; x < (avctx->width >> 1); x++) {
- val = *buf++;
+ val = *buf++;
U[0] = U[-1] + delta_table[val >> 4];
Y[0] = Y[-1] + delta_table[val & 0xF];
- val = *buf++;
+ val = *buf++;
V[0] = V[-1] + delta_table[val >> 4];
Y[1] = Y[ 0] + delta_table[val & 0xF];
- Y += 2; U++; V++;
+ Y += 2; U++; V++;
}
Y += s->frame.linesize[0] - avctx->width;
U += s->frame.linesize[1] - (avctx->width >> 1);
@@ -110,7 +109,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
}
*got_frame = 1;
- *(AVFrame*)data= s->frame;
+ *(AVFrame*)data = s->frame;
return pkt->size;
}