diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2006-07-01 22:52:56 +0000 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2006-07-01 22:52:56 +0000 |
commit | 0abc2e73f864d190e5c9b5841f3bccf64f8cee0e (patch) | |
tree | 60e2361d3f50d6fb81083a0f872dfb2c7ff89a7f | |
parent | 91c58c944fb7dcd0ecc7b8cf9ffd879bc7b1aa06 (diff) | |
download | ffmpeg-0abc2e73f864d190e5c9b5841f3bccf64f8cee0e.tar.gz |
new files for the CAVS decoder by (Stefan Gehrer <stefan gehrer gmx de)
Originally committed as revision 5567 to svn://svn.ffmpeg.org/ffmpeg/trunk
-rw-r--r-- | libavcodec/cavs.c | 1550 | ||||
-rw-r--r-- | libavcodec/cavsdata.h | 613 | ||||
-rw-r--r-- | libavcodec/cavsdsp.c | 511 | ||||
-rw-r--r-- | libavcodec/cavsdsp.h | 95 |
4 files changed, 2769 insertions, 0 deletions
diff --git a/libavcodec/cavs.c b/libavcodec/cavs.c new file mode 100644 index 0000000000..c8bf89f966 --- /dev/null +++ b/libavcodec/cavs.c @@ -0,0 +1,1550 @@ +/* + * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. + * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "avcodec.h" +#include "bitstream.h" +#include "golomb.h" +#include "mpegvideo.h" +#include "cavsdata.h" + +typedef struct { + MpegEncContext s; + Picture picture; //currently decoded frame + Picture DPB[2]; //reference frames + int dist[2]; //temporal distances from current frame to ref frames + int profile, level; + int aspect_ratio; + int mb_width, mb_height; + int pic_type; + int progressive; + int pic_structure; + int skip_mode_flag; + int loop_filter_disable; + int alpha_offset, beta_offset; + int ref_flag; + int mbx, mby; + int flags; + int stc; + uint8_t *cy, *cu, *cv; + int left_qp; + uint8_t *top_qp; + + /* mv motion vector cache + 0: D3 B2 B3 C2 + 4: A1 X0 X1 - + 8: A3 X2 X3 - + + X are the vectors in the current macroblock (5,6,9,10) + A is the macroblock to the left (4,8) + B is the macroblock to the top (1,2) + C is the macroblock to the top-right (3) + D is the macroblock to the top-left (0) + + the same is repeated for backward motion vectors */ + vector_t mv[2*4*3]; + vector_t *top_mv[2]; + vector_t *col_mv; + + /* luma pred mode cache + 0: -- B2 B3 + 3: A1 X0 X1 + 6: A3 X2 X3 */ + int pred_mode_Y[3*3]; + int *top_pred_Y; + int l_stride, c_stride; + int luma_scan[4]; + int qp; + int qp_fixed; + int cbp; + + /* intra prediction is done with un-deblocked samples + they are saved here before deblocking the MB */ + uint8_t *top_border_y, *top_border_u, *top_border_v; + uint8_t left_border_y[16], left_border_u[8], left_border_v[8]; + uint8_t topleft_border_y, topleft_border_u, topleft_border_v; + + void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride); + void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride); + uint8_t *col_type_base; + uint8_t *col_type; + int sym_factor; + int direct_den[2]; + int scale_den[2]; + int got_keyframe; +} AVSContext; + +/***************************************************************************** + * + * in-loop deblocking filter + * + ****************************************************************************/ + +static inline int get_bs_p(vector_t *mvP, vector_t *mvQ) { + if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) + return 2; + if(mvP->ref != mvQ->ref) + return 1; + if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) ) + return 1; + return 0; +} + +static inline int get_bs_b(vector_t *mvP, vector_t *mvQ) { + if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) { + return 2; + } else { + vector_t *mvPbw = mvP + MV_BWD_OFFS; + vector_t *mvQbw = mvQ + MV_BWD_OFFS; + if( (abs( mvP->x - mvQ->x) >= 4) || + (abs( mvP->y - mvQ->y) >= 4) || + (abs(mvPbw->x - mvQbw->x) >= 4) || + (abs(mvPbw->y - mvQbw->y) >= 4) ) + return 1; + } + return 0; +} + +/* boundary strength (bs) mapping: + * + * --4---5-- + * 0 2 | + * | 6 | 7 | + * 1 3 | + * --------- + * + */ + +#define SET_PARAMS \ + alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \ + beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \ + tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)]; + +static void filter_mb(AVSContext *h, enum mb_t mb_type) { + uint8_t bs[8]; + int qp_avg, alpha, beta, tc; + int i; + + /* save un-deblocked lines */ + h->topleft_border_y = h->top_border_y[h->mbx*16+15]; + h->topleft_border_u = h->top_border_u[h->mbx*8+7]; + h->topleft_border_v = h->top_border_v[h->mbx*8+7]; + memcpy(&h->top_border_y[h->mbx*16], h->cy + 15* h->l_stride,16); + memcpy(&h->top_border_u[h->mbx* 8], h->cu + 7* h->c_stride,8); + memcpy(&h->top_border_v[h->mbx* 8], h->cv + 7* h->c_stride,8); + for(i=0;i<8;i++) { + h->left_border_y[i*2+0] = *(h->cy + 15 + (i*2+0)*h->l_stride); + h->left_border_y[i*2+1] = *(h->cy + 15 + (i*2+1)*h->l_stride); + h->left_border_u[i] = *(h->cu + 7 + i*h->c_stride); + h->left_border_v[i] = *(h->cv + 7 + i*h->c_stride); + } + if(!h->loop_filter_disable) { + /* clear bs */ + *((uint64_t *)bs) = 0; + /* determine bs */ + switch(mb_type) { + case I_8X8: + *((uint64_t *)bs) = 0x0202020202020202ULL; + break; + case P_8X8: + case P_8X16: + bs[2] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]); + bs[3] = get_bs_p(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]); + case P_16X8: + bs[6] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]); + bs[7] = get_bs_p(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]); + case P_16X16: + case P_SKIP: + bs[0] = get_bs_p(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]); + bs[1] = get_bs_p(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]); + bs[4] = get_bs_p(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]); + bs[5] = get_bs_p(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]); + break; + case B_SKIP: + case B_DIRECT: + case B_8X8: + bs[2] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]); + bs[3] = get_bs_b(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]); + bs[6] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]); + bs[7] = get_bs_b(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]); + case B_FWD_16X16: + case B_BWD_16X16: + case B_SYM_16X16: + bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]); + bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]); + bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]); + bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]); + break; + default: + if(mb_type & 1) { //16X8 + bs[6] = bs[7] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]); + } else { //8X16 + bs[2] = bs[3] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]); + } + bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]); + bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]); + bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]); + bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]); + } + if( *((uint64_t *)bs) ) { + if(h->flags & A_AVAIL) { + qp_avg = (h->qp + h->left_qp + 1) >> 1; + SET_PARAMS; + h->s.dsp.cavs_filter_lv(h->cy,h->l_stride,alpha,beta,tc,bs[0],bs[1]); + h->s.dsp.cavs_filter_cv(h->cu,h->c_stride,alpha,beta,tc,bs[0],bs[1]); + h->s.dsp.cavs_filter_cv(h->cv,h->c_stride,alpha,beta,tc,bs[0],bs[1]); + } + qp_avg = h->qp; + SET_PARAMS; + h->s.dsp.cavs_filter_lv(h->cy + 8,h->l_stride,alpha,beta,tc,bs[2],bs[3]); + h->s.dsp.cavs_filter_lh(h->cy + 8*h->l_stride,h->l_stride,alpha,beta,tc, + bs[6],bs[7]); + + if(h->flags & B_AVAIL) { + qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1; + SET_PARAMS; + h->s.dsp.cavs_filter_lh(h->cy,h->l_stride,alpha,beta,tc,bs[4],bs[5]); + h->s.dsp.cavs_filter_ch(h->cu,h->c_stride,alpha,beta,tc,bs[4],bs[5]); + h->s.dsp.cavs_filter_ch(h->cv,h->c_stride,alpha,beta,tc,bs[4],bs[5]); + } + } + } + h->left_qp = h->qp; + h->top_qp[h->mbx] = h->qp; +} + +#undef SET_PARAMS + +/***************************************************************************** + * + * spatial intra prediction + * + ****************************************************************************/ + +static inline void load_intra_pred_luma(AVSContext *h, uint8_t *top, + uint8_t *left, int block) { + int i; + + switch(block) { + case 0: + memcpy(&left[1],h->left_border_y,16); + left[0] = left[1]; + left[17] = left[16]; + memcpy(&top[1],&h->top_border_y[h->mbx*16],16); + top[17] = top[16]; + top[0] = top[1]; + if((h->flags & A_AVAIL) && (h->flags & B_AVAIL)) + left[0] = top[0] = h->topleft_border_y; + break; + case 1: + for(i=0;i<8;i++) + left[i+1] = *(h->cy + 7 + i*h->l_stride); + memset(&left[9],left[8],9); + left[0] = left[1]; + memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8); + if(h->flags & C_AVAIL) + memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8); + else + memset(&top[9],top[8],9); + top[17] = top[16]; + top[0] = top[1]; + if(h->flags & B_AVAIL) + left[0] = top[0] = h->top_border_y[h->mbx*16+7]; + break; + case 2: + memcpy(&left[1],&h->left_border_y[8],8); + memset(&left[9],left[8],9); + memcpy(&top[1],h->cy + 7*h->l_stride,16); + top[17] = top[16]; + left[0] = h->left_border_y[7]; + top[0] = top[1]; + if(h->flags & A_AVAIL) + top[0] = left[0]; + break; + case 3: + for(i=0;i<9;i++) + left[i] = *(h->cy + 7 + (i+7)*h->l_stride); + memset(&left[9],left[8],9); + memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9); + memset(&top[9],top[8],9); + break; + } +} + +static inline void load_intra_pred_chroma(uint8_t *stop, uint8_t *sleft, + uint8_t stopleft, uint8_t *dtop, + uint8_t *dleft, int stride, int flags) { + int i; + + if(flags & A_AVAIL) { + for(i=0; i<8; i++) + dleft[i+1] = sleft[i]; + dleft[0] = dleft[1]; + dleft[9] = dleft[8]; + } + if(flags & B_AVAIL) { + for(i=0; i<8; i++) + dtop[i+1] = stop[i]; + dtop[0] = dtop[1]; + dtop[9] = dtop[8]; + if(flags & A_AVAIL) + dleft[0] = dtop[0] = stopleft; + } +} + +static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int y; + uint64_t a = *((uint64_t *)(&top[1])); + for(y=0;y<8;y++) { + *((uint64_t *)(d+y*stride)) = a; + } +} + +static void intra_pred_horiz(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int y; + uint64_t a; + for(y=0;y<8;y++) { + a = left[y+1] * 0x0101010101010101ULL; + *((uint64_t *)(d+y*stride)) = a; + } +} + +static void intra_pred_dc_128(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int y; + uint64_t a = 0x8080808080808080ULL; + for(y=0;y<8;y++) + *((uint64_t *)(d+y*stride)) = a; +} + +static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int x,y,ia; + int ih = 0; + int iv = 0; + uint8_t *cm = cropTbl + MAX_NEG_CROP; + + for(x=0; x<4; x++) { + ih += (x+1)*(top[5+x]-top[3-x]); + iv += (x+1)*(left[5+x]-left[3-x]); + } + ia = (top[8]+left[8])<<4; + ih = (17*ih+16)>>5; + iv = (17*iv+16)>>5; + for(y=0; y<8; y++) + for(x=0; x<8; x++) + d[y*stride+x] = cm[(ia+(x-3)*ih+(y-3)*iv+16)>>5]; +} + +#define LOWPASS(ARRAY,INDEX) \ + (( ARRAY[(INDEX)-1] + 2*ARRAY[(INDEX)] + ARRAY[(INDEX)+1] + 2) >> 2) + +static void intra_pred_lp(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int x,y; + for(y=0; y<8; y++) + for(x=0; x<8; x++) + d[y*stride+x] = (LOWPASS(top,x+1) + LOWPASS(left,y+1)) >> 1; +} + +static void intra_pred_down_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int x,y; + for(y=0; y<8; y++) + for(x=0; x<8; x++) + d[y*stride+x] = (LOWPASS(top,x+y+2) + LOWPASS(left,x+y+2)) >> 1; +} + +static void intra_pred_down_right(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int x,y; + for(y=0; y<8; y++) + for(x=0; x<8; x++) + if(x==y) + d[y*stride+x] = (left[1]+2*top[0]+top[1]+2)>>2; + else if(x>y) + d[y*stride+x] = LOWPASS(top,x-y); + else + d[y*stride+x] = LOWPASS(left,y-x); +} + +static void intra_pred_lp_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int x,y; + for(y=0; y<8; y++) + for(x=0; x<8; x++) + d[y*stride+x] = LOWPASS(left,y+1); +} + +static void intra_pred_lp_top(uint8_t *d,uint8_t *top,uint8_t *left,int stride) { + int x,y; + for(y=0; y<8; y++) + for(x=0; x<8; x++) + d[y*stride+x] = LOWPASS(top,x+1); +} + +#undef LOWPASS + +static inline void modify_pred(const int8_t *mod_table, int *mode) { + int newmode = mod_table[(int)*mode]; + if(newmode < 0) { + av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n"); + *mode = 0; + } else { + *mode = newmode; + } +} + +/***************************************************************************** + * + * motion compensation + * + ****************************************************************************/ + +static inline void mc_dir_part(AVSContext *h,Picture *pic,int square, + int chroma_height,int delta,int list,uint8_t *dest_y, + uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset, + int src_y_offset,qpel_mc_func *qpix_op, + h264_chroma_mc_func chroma_op,vector_t *mv){ + MpegEncContext * const s = &h->s; + const int mx= mv->x + src_x_offset*8; + const int my= mv->y + src_y_offset*8; + const int luma_xy= (mx&3) + ((my&3)<<2); + uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride; + uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride; + uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride; + int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; + int extra_height= extra_width; + int emu=0; + const int full_mx= mx>>2; + const int full_my= my>>2; + const int pic_width = 16*h->mb_width; + const int pic_height = 16*h->mb_height; + + if(!pic->data[0]) + return; + if(mx&7) extra_width -= 3; + if(my&7) extra_height -= 3; + + if( full_mx < 0-extra_width + || full_my < 0-extra_height + || full_mx + 16/*FIXME*/ > pic_width + extra_width + || full_my + 16/*FIXME*/ > pic_height + extra_height){ + ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->l_stride, h->l_stride, + 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height); + src_y= s->edge_emu_buffer + 2 + 2*h->l_stride; + emu=1; + } + + qpix_op[luma_xy](dest_y, src_y, h->l_stride); //FIXME try variable height perhaps? + if(!square){ + qpix_op[luma_xy](dest_y + delta, src_y + delta, h->l_stride); + } + + if(emu){ + ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->c_stride, + 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1); + src_cb= s->edge_emu_buffer; + } + chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx&7, my&7); + + if(emu){ + ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->c_stride, + 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1); + src_cr= s->edge_emu_buffer; + } + chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx&7, my&7); +} + +static inline void mc_part_std(AVSContext *h,int square,int chroma_height,int delta, + uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr, + int x_offset, int y_offset,qpel_mc_func *qpix_put, + h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg, + h264_chroma_mc_func chroma_avg, vector_t *mv){ + qpel_mc_func *qpix_op= qpix_put; + h264_chroma_mc_func chroma_op= chroma_put; + + dest_y += 2*x_offset + 2*y_offset*h->l_stride; + dest_cb += x_offset + y_offset*h->c_stride; + dest_cr += x_offset + y_offset*h->c_stride; + x_offset += 8*h->mbx; + y_offset += 8*h->mby; + + if(mv->ref >= 0){ + Picture *ref= &h->DPB[mv->ref]; + mc_dir_part(h, ref, square, chroma_height, delta, 0, + dest_y, dest_cb, dest_cr, x_offset, y_offset, + qpix_op, chroma_op, mv); + + qpix_op= qpix_avg; + chroma_op= chroma_avg; + } + + if((mv+MV_BWD_OFFS)->ref >= 0){ + Picture *ref= &h->DPB[0]; + mc_dir_part(h, ref, square, chroma_height, delta, 1, + dest_y, dest_cb, dest_cr, x_offset, y_offset, + qpix_op, chroma_op, mv+MV_BWD_OFFS); + } +} + +static void inter_pred(AVSContext *h) { + /* always do 8x8 blocks TODO: are larger blocks worth it? */ + mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 0, + h->s.dsp.put_cavs_qpel_pixels_tab[1], + h->s.dsp.put_h264_chroma_pixels_tab[1], + h->s.dsp.avg_cavs_qpel_pixels_tab[1], + h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]); + mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 0, + h->s.dsp.put_cavs_qpel_pixels_tab[1], + h->s.dsp.put_h264_chroma_pixels_tab[1], + h->s.dsp.avg_cavs_qpel_pixels_tab[1], + h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]); + mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 4, + h->s.dsp.put_cavs_qpel_pixels_tab[1], + h->s.dsp.put_h264_chroma_pixels_tab[1], + h->s.dsp.avg_cavs_qpel_pixels_tab[1], + h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]); + mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 4, + h->s.dsp.put_cavs_qpel_pixels_tab[1], + h->s.dsp.put_h264_chroma_pixels_tab[1], + h->s.dsp.avg_cavs_qpel_pixels_tab[1], + h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]); + /* set intra prediction modes to default values */ + h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP; + h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP; +} + +/***************************************************************************** + * + * motion vector prediction + * + ****************************************************************************/ + +static inline void veccpy(vector_t *dst, vector_t *src) { + *((uint64_t *)dst) = *((uint64_t *)src); +} + +static inline void set_mvs(vector_t *mv, enum block_t size) { + switch(size) { + case BLK_16X16: + veccpy(mv+MV_STRIDE ,mv); + veccpy(mv+MV_STRIDE+1,mv); + case BLK_16X8: + veccpy(mv +1,mv); + break; + case BLK_8X16: + veccpy(mv+MV_STRIDE ,mv); + break; + } +} + +static inline void store_mvs(AVSContext *h) { + veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 0], &h->mv[MV_FWD_X0]); + veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 1], &h->mv[MV_FWD_X1]); + veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 2], &h->mv[MV_FWD_X2]); + veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 3], &h->mv[MV_FWD_X3]); +} + +static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) { + int den = h->scale_den[src->ref]; + + *d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9; + *d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9; +} + +static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) { + int ax, ay, bx, by, cx, cy; + int len_ab, len_bc, len_ca, len_mid; + + /* scale candidates according to their temporal span */ + scale_mv(h, &ax, &ay, mvA, mvP->dist); + scale_mv(h, &bx, &by, mvB, mvP->dist); + scale_mv(h, &cx, &cy, mvC, mvP->dist); + /* find the geometrical median of the three candidates */ + len_ab = abs(ax - bx) + abs(ay - by); + len_bc = abs(bx - cx) + abs(by - cy); + len_ca = abs(cx - ax) + abs(cy - ay); + len_mid = mid_pred(len_ab, len_bc, len_ca); + if(len_mid == len_ab) { + mvP->x = cx; + mvP->y = cy; + } else if(len_mid == len_bc) { + mvP->x = ax; + mvP->y = ay; + } else { + mvP->x = bx; + mvP->y = by; + } +} + +static inline void mv_pred_direct(AVSContext *h, vector_t *pmv_fw, + vector_t *pmv_bw, vector_t *col_mv) { + int den = h->direct_den[col_mv->ref]; + int m = col_mv->x >> 31; + + pmv_fw->dist = h->dist[1]; + pmv_bw->dist = h->dist[0]; + pmv_fw->ref = 1; + pmv_bw->ref = 0; + /* scale the co-located motion vector according to its temporal span */ + pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m; + pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m); + m = col_mv->y >> 31; + pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m; + pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m); +} + +static inline void mv_pred_sym(AVSContext *h, vector_t *src, enum block_t size) { + vector_t *dst = src + MV_BWD_OFFS; + + /* backward mv is the scaled and negated forward mv */ + dst->x = -((src->x * h->sym_factor + 256) >> 9); + dst->y = -((src->y * h->sym_factor + 256) >> 9); + dst->ref = 0; + dst->dist = h->dist[0]; + set_mvs(dst, size); +} + +static void mv_pred(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC, + enum mv_pred_t mode, enum block_t size, int ref) { + vector_t *mvP = &h->mv[nP]; + vector_t *mvA = &h->mv[nP-1]; + vector_t *mvB = &h->mv[nP-4]; + vector_t *mvC = &h->mv[nC]; + int mvAref = mvA->ref; + int mvBref = mvB->ref; + int mvCref; + + mvP->ref = ref; + mvP->dist = h->dist[mvP->ref]; + if(mvC->ref == NOT_AVAIL) + mvC = &h->mv[nP-5]; // set to top-left (mvD) + mvCref = mvC->ref; + if(mode == MV_PRED_PSKIP) { + if((mvAref == NOT_AVAIL) || (mvBref == NOT_AVAIL) || + ((mvA->x | mvA->y | mvA->ref) == 0) || + ((mvB->x | mvB->y | mvB->ref) == 0) ) { + mvP->x = mvP->y = 0; + set_mvs(mvP,size); + return; + } + } + /* if there is only one suitable candidate, take it */ + if((mvAref >= 0) && (mvBref < 0) && (mvCref < 0)) { + mvP->x = mvA->x; + mvP->y = mvA->y; + } else if((mvAref < 0) && (mvBref >= 0) && (mvCref < 0)) { + mvP->x = mvB->x; + mvP->y = mvB->y; + } else if((mvAref < 0) && (mvBref < 0) && (mvCref >= 0)) { + mvP->x = mvC->x; + mvP->y = mvC->y; + } else { + switch(mode) { + case MV_PRED_LEFT: + if(mvAref == mvP->ref) { + mvP->x = mvA->x; + mvP->y = mvA->y; + } else + mv_pred_median(h, mvP, mvA, mvB, mvC); + break; + case MV_PRED_TOP: + if(mvBref == mvP->ref) { + mvP->x = mvB->x; + mvP->y = mvB->y; + } else + mv_pred_median(h, mvP, mvA, mvB, mvC); + break; + case MV_PRED_TOPRIGHT: + if(mvCref == mvP->ref) { + mvP->x = mvC->x; + mvP->y = mvC->y; + } else + mv_pred_median(h, mvP, mvA, mvB, mvC); + break; + default: + mv_pred_median(h, mvP, mvA, mvB, mvC); + break; + } + } + if(mode < MV_PRED_PSKIP) { + mvP->x += get_se_golomb(&h->s.gb); + mvP->y += get_se_golomb(&h->s.gb); + } + set_mvs(mvP,size); +} + +/***************************************************************************** + * + * residual data decoding + * + ****************************************************************************/ + +/* kth-order exponential golomb code */ +static inline int get_ue_code(GetBitContext *gb, int order) { + if(order) + return (get_ue_golomb(gb) << order) + get_bits(gb,order); + return get_ue_golomb(gb); +} + +static int decode_residual_block(AVSContext *h, GetBitContext *gb, + const residual_vlc_t *r, int esc_golomb_order, + int qp, uint8_t *dst, int stride) { + int i,pos = -1; + int level_code, esc_code, level, run, mask; + int level_buf[64]; + int run_buf[64]; + int dqm = dequant_mul[qp]; + int dqs = dequant_shift[qp]; + int dqa = 1 << (dqs - 1); + const uint8_t *scantab = ff_zigzag_direct; + DCTELEM block[64]; + + memset(block,0,64*sizeof(DCTELEM)); + for(i=0;i<65;i++) { + level_code = get_ue_code(gb,r->golomb_order); + if(level_code >= ESCAPE_CODE) { + run = (level_code - ESCAPE_CODE) >> 1; + esc_code = get_ue_code(gb,esc_golomb_order); + level = esc_code + (run > r->max_run ? 1 : r->level_add[run]); + while(level > r->inc_limit) + r++; + mask = -(level_code & 1); + level = (level^mask) - mask; + } else { + if(level_code < 0) + return -1; + level = r->rltab[level_code][0]; + if(!level) //end of block signal + break; + run = r->rltab[level_code][1]; + r += r->rltab[level_code][2]; + } + level_buf[i] = level; + run_buf[i] = run; + } + /* inverse scan and dequantization */ + for(i=i-1;i>=0;i--) { + pos += 1 + run_buf[i]; + if(pos > 63) { + av_log(h->s.avctx, AV_LOG_ERROR, + "position out of block bounds at pic %d MB(%d,%d)\n", + h->picture.poc, h->mbx, h->mby); + return -1; + } + block[scantab[pos]] = (level_buf[i]*dqm + dqa) >> dqs; + } + h->s.dsp.cavs_idct8_add(dst,block,stride); + return 0; +} + + +static inline void decode_residual_chroma(AVSContext *h) { + if(h->cbp & (1<<4)) + decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp], + h->cu,h->c_stride); + if(h->cbp & (1<<5)) + decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp], + h->cv,h->c_stride); +} + +static inline void decode_residual_inter(AVSContext *h) { + int block; + + /* get coded block pattern */ + h->cbp = cbp_tab[get_ue_golomb(&h->s.gb)][1]; + /* get quantizer */ + if(h->cbp && !h->qp_fixed) + h->qp += get_se_golomb(&h->s.gb); + for(block=0;block<4;block++) + if(h->cbp & (1<<block)) + decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp, + h->cy + h->luma_scan[block], h->l_stride); + decode_residual_chroma(h); +} + +/***************************************************************************** + * + * macroblock level + * + ****************************************************************************/ + +static inline void init_mb(AVSContext *h) { + int i; + + /* copy predictors from top line (MB B and C) into cache */ + for(i=0;i<3;i++) { + veccpy(&h->mv[MV_FWD_B2+i],&h->top_mv[0][h->mbx*2+i]); + veccpy(&h->mv[MV_BWD_B2+i],&h->top_mv[1][h->mbx*2+i]); + } + h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0]; + h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1]; + /* clear top predictors if MB B is not available */ + if(!(h->flags & B_AVAIL)) { + veccpy(&h->mv[MV_FWD_B2],(vector_t *)&un_mv); + veccpy(&h->mv[MV_FWD_B3],(vector_t *)&un_mv); + veccpy(&h->mv[MV_BWD_B2],(vector_t *)&un_mv); + veccpy(&h->mv[MV_BWD_B3],(vector_t *)&un_mv); + h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL; + h->flags &= ~(C_AVAIL|D_AVAIL); + } else if(h->mbx) { + h->flags |= D_AVAIL; + } + if(h->mbx == h->mb_width-1) //MB C not available + h->flags &= ~C_AVAIL; + /* clear top-right predictors if MB C is not available */ + if(!(h->flags & C_AVAIL)) { + veccpy(&h->mv[MV_FWD_C2],(vector_t *)&un_mv); + veccpy(&h->mv[MV_BWD_C2],(vector_t *)&un_mv); + } + /* clear top-left predictors if MB D is not available */ + if(!(h->flags & D_AVAIL)) { + veccpy(&h->mv[MV_FWD_D3],(vector_t *)&un_mv); + veccpy(&h->mv[MV_BWD_D3],(vector_t *)&un_mv); + } + /* set pointer for co-located macroblock type */ + h->col_type = &h->col_type_base[h->mby*h->mb_width + h->mbx]; +} + +static inline void check_for_slice(AVSContext *h); + +static inline int next_mb(AVSContext *h) { + int i; + + h->flags |= A_AVAIL; + h->cy += 16; + h->cu += 8; + h->cv += 8; + /* copy mvs as predictors to the left */ + for(i=0;i<=20;i+=4) + veccpy(&h->mv[i],&h->mv[i+2]); + /* copy bottom mvs from cache to top line */ + veccpy(&h->top_mv[0][h->mbx*2+0],&h->mv[MV_FWD_X2]); + veccpy(&h->top_mv[0][h->mbx*2+1],&h->mv[MV_FWD_X3]); + veccpy(&h->top_mv[1][h->mbx*2+0],&h->mv[MV_BWD_X2]); + veccpy(&h->top_mv[1][h->mbx*2+1],&h->mv[MV_BWD_X3]); + /* next MB address */ + h->mbx++; + if(h->mbx == h->mb_width) { //new mb line + h->flags = B_AVAIL|C_AVAIL; + /* clear left pred_modes */ + h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL; + /* clear left mv predictors */ + for(i=0;i<=20;i+=4) + veccpy(&h->mv[i],(vector_t *)&un_mv); + h->mbx = 0; + h->mby++; + /* re-calculate sample pointers */ + h->cy = h->picture.data[0] + h->mby*16*h->l_stride; + h->cu = h->picture.data[1] + h->mby*8*h->c_stride; + h->cv = h->picture.data[2] + h->mby*8*h->c_stride; + if(h->mby == h->mb_height) { //frame end + return 0; + } else { + //check_for_slice(h); + } + } + return 1; +} + +static void decode_mb_i(AVSContext *h, int is_i_pic) { + GetBitContext *gb = &h->s.gb; + int block, pred_mode_uv; + uint8_t top[18]; + uint8_t left[18]; + uint8_t *d; + + /* get intra prediction modes from stream */ + for(block=0;block<4;block++) { + int nA,nB,predpred; + int pos = scan3x3[block]; + + nA = h->pred_mode_Y[pos-1]; + nB = h->pred_mode_Y[pos-3]; + if((nA == NOT_AVAIL) || (nB == NOT_AVAIL)) + predpred = 2; + else + predpred = FFMIN(nA,nB); + if(get_bits1(gb)) + h->pred_mode_Y[pos] = predpred; + else { + h->pred_mode_Y[pos] = get_bits(gb,2); + if(h->pred_mode_Y[pos] >= predpred) + h->pred_mode_Y[pos]++; + } + } + pred_mode_uv = get_ue_golomb(gb); + if(pred_mode_uv > 6) { + av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n"); + pred_mode_uv = 0; + } + + /* save pred modes before they get modified */ + h->pred_mode_Y[3] = h->pred_mode_Y[5]; + h->pred_mode_Y[6] = h->pred_mode_Y[8]; + h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7]; + h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8]; + + /* modify pred modes according to availability of neighbour samples */ + if(!(h->flags & A_AVAIL)) { + modify_pred(left_modifier_l, &h->pred_mode_Y[4] ); + modify_pred(left_modifier_l, &h->pred_mode_Y[7] ); + modify_pred(left_modifier_c, &pred_mode_uv ); + } + if(!(h->flags & B_AVAIL)) { + modify_pred(top_modifier_l, &h->pred_mode_Y[4] ); + modify_pred(top_modifier_l, &h->pred_mode_Y[5] ); + modify_pred(top_modifier_c, &pred_mode_uv ); + } + + /* get coded block pattern */ + if(is_i_pic) + h->cbp = cbp_tab[get_ue_golomb(gb)][0]; + if(h->cbp && !h->qp_fixed) + h->qp += get_se_golomb(gb); //qp_delta + + /* luma intra prediction interleaved with residual decode/transform/add */ + for(block=0;block<4;block++) { + d = h->cy + h->luma_scan[block]; + load_intra_pred_luma(h, top, left, block); + h->intra_pred_l[(int)h->pred_mode_Y[scan3x3[block]]] + (d, top, left, h->l_stride); + if(h->cbp & (1<<block)) + decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride); + } + + /* chroma intra prediction */ + load_intra_pred_chroma(&h->top_border_u[h->mbx*8], h->left_border_u, + h->topleft_border_u, top, left, h->c_stride, h->flags); + h->intra_pred_c[pred_mode_uv](h->cu, top, left, h->c_stride); + load_intra_pred_chroma(&h->top_border_v[h->mbx*8], h->left_border_v, + h->topleft_border_v, top, left, h->c_stride, h->flags); + h->intra_pred_c[pred_mode_uv](h->cv, top, left, h->c_stride); + + decode_residual_chroma(h); + filter_mb(h,I_8X8); + + /* mark motion vectors as intra */ + veccpy( &h->mv[MV_FWD_X0], (vector_t *)&intra_mv); + set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); + veccpy( &h->mv[MV_BWD_X0], (vector_t *)&intra_mv); + set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); + if(h->pic_type != FF_B_TYPE) + *h->col_type = I_8X8; +} + +static void mb_skip_p(AVSContext *h) { + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0); + inter_pred(h); + store_mvs(h); + filter_mb(h,P_SKIP); + *h->col_type = P_SKIP; +} + + +static void mb_skip_b(AVSContext *h) { + int i; + + if(!(*h->col_type)) { + /* intra MB at co-location, do in-plane prediction */ + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1); + mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0); + } else { + /* direct prediction from co-located P MB, block-wise */ + for(i=0;i<4;i++) + mv_pred_direct(h,&h->mv[mv_scan[i]], + &h->mv[mv_scan[i]+MV_BWD_OFFS], + &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + i]); + } +} + +static void decode_mb_p(AVSContext *h, enum mb_t mb_type) { + GetBitContext *gb = &h->s.gb; + int ref[4]; + + switch(mb_type) { + case P_SKIP: + mb_skip_p(h); + return; + case P_16X16: + ref[0] = h->ref_flag ? 0 : get_bits1(gb); + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]); + break; + case P_16X8: + ref[0] = h->ref_flag ? 0 : get_bits1(gb); + ref[2] = h->ref_flag ? 0 : get_bits1(gb); + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]); + mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]); + break; + case P_8X16: + ref[0] = h->ref_flag ? 0 : get_bits1(gb); + ref[1] = h->ref_flag ? 0 : get_bits1(gb); + mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]); + mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT, BLK_8X16, ref[1]); + break; + case P_8X8: + ref[0] = h->ref_flag ? 0 : get_bits1(gb); + ref[1] = h->ref_flag ? 0 : get_bits1(gb); + ref[2] = h->ref_flag ? 0 : get_bits1(gb); + ref[3] = h->ref_flag ? 0 : get_bits1(gb); + mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]); + mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]); + mv_pred(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]); + mv_pred(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]); + } + inter_pred(h); + store_mvs(h); + decode_residual_inter(h); + filter_mb(h,mb_type); + *h->col_type = mb_type; +} + +static void decode_mb_b(AVSContext *h, enum mb_t mb_type) { + int block; + enum sub_mb_t sub_type[4]; + int flags; + + /* reset all MVs */ + veccpy( &h->mv[MV_FWD_X0], (vector_t *)&dir_mv); + set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); + veccpy( &h->mv[MV_BWD_X0], (vector_t *)&dir_mv); + set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); + switch(mb_type) { + case B_SKIP: + mb_skip_b(h); + inter_pred(h); + filter_mb(h,B_SKIP); + return; + case B_DIRECT: + mb_skip_b(h); + break; + case B_FWD_16X16: + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1); + break; + case B_SYM_16X16: + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1); + mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16); + break; + case B_BWD_16X16: + mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0); + break; + case B_8X8: + for(block=0;block<4;block++) + sub_type[block] = get_bits(&h->s.gb,2); + for(block=0;block<4;block++) { + switch(sub_type[block]) { + case B_SUB_DIRECT: + if(!(*h->col_type)) { + /* intra MB at co-location, do in-plane prediction */ + mv_pred(h, mv_scan[block], mv_scan[block]-3, + MV_PRED_BSKIP, BLK_8X8, 1); + mv_pred(h, mv_scan[block]+MV_BWD_OFFS, + mv_scan[block]-3+MV_BWD_OFFS, + MV_PRED_BSKIP, BLK_8X8, 0); + } else + mv_pred_direct(h,&h->mv[mv_scan[block]], + &h->mv[mv_scan[block]+MV_BWD_OFFS], + &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + block]); + break; + case B_SUB_FWD: + mv_pred(h, mv_scan[block], mv_scan[block]-3, + MV_PRED_MEDIAN, BLK_8X8, 1); + break; + case B_SUB_SYM: + mv_pred(h, mv_scan[block], mv_scan[block]-3, + MV_PRED_MEDIAN, BLK_8X8, 1); + mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8); + break; + } + } + for(block=0;block<4;block++) { + if(sub_type[block] == B_SUB_BWD) + mv_pred(h, mv_scan[block]+MV_BWD_OFFS, + mv_scan[block]+MV_BWD_OFFS-3, + MV_PRED_MEDIAN, BLK_8X8, 0); + } + break; + default: + assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8)); + flags = b_partition_flags[(mb_type-1)>>1]; + if(mb_type & 1) { /* 16x8 macroblock types */ + if(flags & FWD0) + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1); + if(flags & SYM0) { + mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1); + mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8); + } + if(flags & FWD1) + mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1); + if(flags & SYM1) { + mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1); + mv_pred_sym(h, &h->mv[9], BLK_16X8); + } + if(flags & BWD0) + mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0); + if(flags & BWD1) + mv_pred(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0); + } else { /* 8x16 macroblock types */ + if(flags & FWD0) + mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1); + if(flags & SYM0) { + mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1); + mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16); + } + if(flags & FWD1) + mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1); + if(flags & SYM1) { + mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1); + mv_pred_sym(h, &h->mv[6], BLK_8X16); + } + if(flags & BWD0) + mv_pred(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0); + if(flags & BWD1) + mv_pred(h, MV_BWD_X1, MV_BWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 0); + } + } + inter_pred(h); + decode_residual_inter(h); + filter_mb(h,mb_type); +} + +/***************************************************************************** + * + * slice level + * + ****************************************************************************/ + +static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) { + if(h->stc > 0xAF) + av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc); + h->mby = h->stc; + if((h->mby == 0) && (!h->qp_fixed)){ + h->qp_fixed = get_bits1(gb); + h->qp = get_bits(gb,6); + } + /* inter frame or second slice can have weighting params */ + if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2)) + if(get_bits1(gb)) { //slice_weighting_flag + av_log(h->s.avctx, AV_LOG_ERROR, + "weighted prediction not yet supported\n"); + } + return 0; +} + +static inline void check_for_slice(AVSContext *h) { + GetBitContext *gb = &h->s.gb; + int align; + align = (-get_bits_count(gb)) & 7; + if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) { + get_bits_long(gb,24+align); + h->stc = get_bits(gb,8); + decode_slice_header(h,gb); + } +} + +/***************************************************************************** + * + * frame level + * + ****************************************************************************/ + +static void init_pic(AVSContext *h) { + int i; + + /* clear some predictors */ + for(i=0;i<=20;i+=4) + veccpy(&h->mv[i],(vector_t *)&un_mv); + veccpy(&h->mv[MV_BWD_X0], (vector_t *)&dir_mv); + set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); + veccpy(&h->mv[MV_FWD_X0], (vector_t *)&dir_mv); + set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); + h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL; + h->cy = h->picture.data[0]; + h->cu = h->picture.data[1]; + h->cv = h->picture.data[2]; + h->l_stride = h->picture.linesize[0]; + h->c_stride = h->picture.linesize[1]; + h->luma_scan[2] = 8*h->l_stride; + h->luma_scan[3] = 8*h->l_stride+8; + h->mbx = h->mby = 0; + h->flags = 0; +} + +static int decode_pic(AVSContext *h) { + MpegEncContext *s = &h->s; + int i,skip_count; + enum mb_t mb_type; + + if (!s->context_initialized) { + if (MPV_common_init(s) < 0) + return -1; + } + get_bits(&s->gb,16);//bbv_dwlay + if(h->stc == PIC_PB_START_CODE) { + h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE; + /* make sure we have the reference frames we need */ + if(!h->DPB[0].data[0] || + (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE)) + return -1; + } else { + h->pic_type = FF_I_TYPE; + if(get_bits1(&s->gb)) + get_bits(&s->gb,16);//time_code + } + /* release last B frame */ + if(h->picture.data[0]) + s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture); + + s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture); + init_pic(h); + h->picture.poc = get_bits(&s->gb,8)*2; + + /* get temporal distances and MV scaling factors */ + if(h->pic_type != FF_B_TYPE) { + h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512; + } else { + h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512; + } + h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512; + h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0; + h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0; + if(h->pic_type == FF_B_TYPE) { + h->sym_factor = h->dist[0]*h->scale_den[1]; + } else { + h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0; + h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0; + } + + if(s->low_delay) + get_ue_golomb(&s->gb); //bbv_check_times + h->progressive = get_bits1(&s->gb); + if(h->progressive) + h->pic_structure = 1; + else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) ) + get_bits1(&s->gb); //advanced_pred_mode_disable + skip_bits1(&s->gb); //top_field_first + skip_bits1(&s->gb); //repeat_first_field + h->qp_fixed = get_bits1(&s->gb); + h->qp = get_bits(&s->gb,6); + if(h->pic_type == FF_I_TYPE) { + if(!h->progressive && !h->pic_structure) + skip_bits1(&s->gb);//what is this? + skip_bits(&s->gb,4); //reserved bits + } else { + if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1)) + h->ref_flag = get_bits1(&s->gb); + skip_bits(&s->gb,4); //reserved bits + h->skip_mode_flag = get_bits1(&s->gb); + } + h->loop_filter_disable = get_bits1(&s->gb); + if(!h->loop_filter_disable && get_bits1(&s->gb)) { + h->alpha_offset = get_se_golomb(&s->gb); + h->beta_offset = get_se_golomb(&s->gb); + } else { + h->alpha_offset = h->beta_offset = 0; + } + check_for_slice(h); + if(h->pic_type == FF_I_TYPE) { + do { + init_mb(h); + decode_mb_i(h,1); + } while(next_mb(h)); + } else if(h->pic_type == FF_P_TYPE) { + do { + if(h->skip_mode_flag) { + skip_count = get_ue_golomb(&s->gb); + for(i=0;i<skip_count;i++) { + init_mb(h); + mb_skip_p(h); + if(!next_mb(h)) + goto done; + } + mb_type = get_ue_golomb(&s->gb) + P_16X16; + } else { + mb_type = get_ue_golomb(&s->gb) + P_SKIP; + } + init_mb(h); + if(mb_type > P_8X8) { + h->cbp = cbp_tab[mb_type - P_8X8 - 1][0]; + decode_mb_i(h,0); + } else { + decode_mb_p(h,mb_type); + } + } while(next_mb(h)); + } else { //FF_B_TYPE + do { + if(h->skip_mode_flag) { + skip_count = get_ue_golomb(&s->gb); + for(i=0;i<skip_count;i++) { + init_mb(h); + mb_skip_b(h); + inter_pred(h); + filter_mb(h,B_SKIP); + if(!next_mb(h)) + goto done; + } + mb_type = get_ue_golomb(&s->gb) + B_DIRECT; + } else { + mb_type = get_ue_golomb(&s->gb) + B_SKIP; + } + init_mb(h); + if(mb_type > B_8X8) { + h->cbp = cbp_tab[mb_type - B_8X8 - 1][0]; + decode_mb_i(h,0); + } else { + decode_mb_b(h,mb_type); + } + } while(next_mb(h)); + } + done: + if(h->pic_type != FF_B_TYPE) { + if(h->DPB[1].data[0]) + s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]); + memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture)); + memcpy(&h->DPB[0], &h->picture, sizeof(Picture)); + memset(&h->picture,0,sizeof(Picture)); + } + return 0; +} + +/***************************************************************************** + * + * headers and interface + * + ****************************************************************************/ + +static void init_top_lines(AVSContext *h) { + /* alloc top line of predictors */ + h->top_qp = av_malloc( h->mb_width); + h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t)); + h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t)); + h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(int)); + h->top_border_y = av_malloc((h->mb_width+1)*16); + h->top_border_u = av_malloc((h->mb_width+1)*8); + h->top_border_v = av_malloc((h->mb_width+1)*8); + + /* alloc space for co-located MVs and types */ + h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t)); + h->col_type_base = av_malloc(h->mb_width*h->mb_height); +} + +static int decode_seq_header(AVSContext *h) { + MpegEncContext *s = &h->s; + extern const AVRational frame_rate_tab[]; + int frame_rate_code; + + h->profile = get_bits(&s->gb,8); + h->level = get_bits(&s->gb,8); + skip_bits1(&s->gb); //progressive sequence + s->width = get_bits(&s->gb,14); + s->height = get_bits(&s->gb,14); + skip_bits(&s->gb,2); //chroma format + skip_bits(&s->gb,3); //sample_precision + h->aspect_ratio = get_bits(&s->gb,4); + frame_rate_code = get_bits(&s->gb,4); + skip_bits(&s->gb,18);//bit_rate_lower + skip_bits1(&s->gb); //marker_bit + skip_bits(&s->gb,12);//bit_rate_upper + s->low_delay = get_bits1(&s->gb); + h->mb_width = (s->width + 15) >> 4; + h->mb_height = (s->height + 15) >> 4; + h->s.avctx->time_base.den = frame_rate_tab[frame_rate_code].num; + h->s.avctx->time_base.num = frame_rate_tab[frame_rate_code].den; + h->s.avctx->width = s->width; + h->s.avctx->height = s->height; + if(!h->top_qp) + init_top_lines(h); + return 0; +} + +/** + * finds the end of the current frame in the bitstream. + * @return the position of the first byte of the next frame, or -1 + */ +int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) { + int pic_found, i; + uint32_t state; + + pic_found= pc->frame_start_found; + state= pc->state; + + i=0; + if(!pic_found){ + for(i=0; i<buf_size; i++){ + state= (state<<8) | buf[i]; + if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){ + i++; + pic_found=1; + break; + } + } + } + + if(pic_found){ + /* EOF considered as end of frame */ + if (buf_size == 0) + return 0; + for(; i<buf_size; i++){ + state= (state<<8) | buf[i]; + if((state&0xFFFFFF00) == 0x100){ + if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){ + pc->frame_start_found=0; + pc->state=-1; + return i-3; + } + } + } + } + pc->frame_start_found= pic_found; + pc->state= state; + return END_NOT_FOUND; +} + +void ff_cavs_flush(AVCodecContext * avctx) { + AVSContext *h = (AVSContext *)avctx->priv_data; + h->got_keyframe = 0; +} + +static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, + uint8_t * buf, int buf_size) { + AVSContext *h = avctx->priv_data; + MpegEncContext *s = &h->s; + int input_size; + const uint8_t *buf_end; + const uint8_t *buf_ptr; + AVFrame *picture = data; + uint32_t stc; + + s->avctx = avctx; + + if (buf_size == 0) { + if(!s->low_delay && h->DPB[0].data[0]) { + *data_size = sizeof(AVPicture); + *picture = *(AVFrame *) &h->DPB[0]; + } + return 0; + } + + buf_ptr = buf; + buf_end = buf + buf_size; + for(;;) { + buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc); + if(stc & 0xFFFFFE00) + return FFMAX(0, buf_ptr - buf - s->parse_context.last_index); + input_size = (buf_end - buf_ptr)*8; + switch(stc) { + case SEQ_START_CODE: + init_get_bits(&s->gb, buf_ptr, input_size); + decode_seq_header(h); + break; + case PIC_I_START_CODE: + if(!h->got_keyframe) { + if(h->DPB[0].data[0]) + avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]); + if(h->DPB[1].data[0]) + avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]); + h->got_keyframe = 1; + } + case PIC_PB_START_CODE: + *data_size = 0; + if(!h->got_keyframe) + break; + init_get_bits(&s->gb, buf_ptr, input_size); + h->stc = stc; + if(decode_pic(h)) + break; + *data_size = sizeof(AVPicture); + if(h->pic_type != FF_B_TYPE) { + if(h->DPB[1].data[0]) { + *picture = *(AVFrame *) &h->DPB[1]; + } else { + *data_size = 0; + } + } else + *picture = *(AVFrame *) &h->picture; + break; + case EXT_START_CODE: + //mpeg_decode_extension(avctx,buf_ptr, input_size); + break; + case USER_START_CODE: + //mpeg_decode_user_data(avctx,buf_ptr, input_size); + break; + default: + if (stc >= SLICE_MIN_START_CODE && + stc <= SLICE_MAX_START_CODE) { + init_get_bits(&s->gb, buf_ptr, input_size); + decode_slice_header(h, &s->gb); + } + break; + } + } +} + +static int cavs_decode_init(AVCodecContext * avctx) { + AVSContext *h = (AVSContext *)avctx->priv_data; + MpegEncContext * const s = &h->s; + + MPV_decode_defaults(s); + s->avctx = avctx; + + avctx->pix_fmt= PIX_FMT_YUV420P; + + h->luma_scan[0] = 0; + h->luma_scan[1] = 8; + h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert; + h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz; + h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp; + h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left; + h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right; + h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left; + h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top; + h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128; + h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp; + h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz; + h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert; + h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane; + h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left; + h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top; + h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128; + veccpy(&h->mv[ 7], (vector_t *)&un_mv); + veccpy(&h->mv[19], (vector_t *)&un_mv); + return 0; +} + +static int cavs_decode_end(AVCodecContext * avctx) { + AVSContext *h = (AVSContext *)avctx->priv_data; + + av_free(h->top_qp); + av_free(h->top_mv[0]); + av_free(h->top_mv[1]); + av_free(h->top_pred_Y); + av_free(h->top_border_y); + av_free(h->top_border_u); + av_free(h->top_border_v); + av_free(h->col_mv); + av_free(h->col_type_base); + return 0; +} + +AVCodec cavs_decoder = { + "cavs", + CODEC_TYPE_VIDEO, + CODEC_ID_CAVS, + sizeof(AVSContext), + cavs_decode_init, + NULL, + cavs_decode_end, + cavs_decode_frame, + CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, //FIXME is this correct ? + .flush= ff_cavs_flush, +}; diff --git a/libavcodec/cavsdata.h b/libavcodec/cavsdata.h new file mode 100644 index 0000000000..a1334d554c --- /dev/null +++ b/libavcodec/cavsdata.h @@ -0,0 +1,613 @@ +/* + * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. + * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define SLICE_MIN_START_CODE 0x00000101 +#define SLICE_MAX_START_CODE 0x000001af +#define EXT_START_CODE 0x000001b5 +#define USER_START_CODE 0x000001b2 +#define SEQ_START_CODE 0x000001b0 +#define PIC_I_START_CODE 0x000001b3 +#define PIC_PB_START_CODE 0x000001b6 + +#define A_AVAIL 1 +#define B_AVAIL 2 +#define C_AVAIL 4 +#define D_AVAIL 8 +#define NOT_AVAIL -1 +#define REF_INTRA -2 +#define REF_DIR -3 + +#define ESCAPE_CODE 59 + +#define FWD0 0x01 +#define FWD1 0x02 +#define BWD0 0x04 +#define BWD1 0x08 +#define SYM0 0x10 +#define SYM1 0x20 + +#define MV_BWD_OFFS 12 +#define MV_STRIDE 4 + +enum mb_t { + I_8X8 = 0, + P_SKIP, + P_16X16, + P_16X8, + P_8X16, + P_8X8, + B_SKIP, + B_DIRECT, + B_FWD_16X16, + B_BWD_16X16, + B_SYM_16X16, + B_8X8 = 29 +}; + +enum sub_mb_t { + B_SUB_DIRECT, + B_SUB_FWD, + B_SUB_BWD, + B_SUB_SYM +}; + +enum intra_luma_t { + INTRA_L_VERT, + INTRA_L_HORIZ, + INTRA_L_LP, + INTRA_L_DOWN_LEFT, + INTRA_L_DOWN_RIGHT, + INTRA_L_LP_LEFT, + INTRA_L_LP_TOP, + INTRA_L_DC_128 +}; + +enum intra_chroma_t { + INTRA_C_LP, + INTRA_C_HORIZ, + INTRA_C_VERT, + INTRA_C_PLANE, + INTRA_C_LP_LEFT, + INTRA_C_LP_TOP, + INTRA_C_DC_128, +}; + +enum mv_pred_t { + MV_PRED_MEDIAN, + MV_PRED_LEFT, + MV_PRED_TOP, + MV_PRED_TOPRIGHT, + MV_PRED_PSKIP, + MV_PRED_BSKIP +}; + +enum block_t { + BLK_16X16, + BLK_16X8, + BLK_8X16, + BLK_8X8 +}; + +enum mv_loc_t { + MV_FWD_D3 = 0, + MV_FWD_B2, + MV_FWD_B3, + MV_FWD_C2, + MV_FWD_A1, + MV_FWD_X0, + MV_FWD_X1, + MV_FWD_A3 = 8, + MV_FWD_X2, + MV_FWD_X3, + MV_BWD_D3 = MV_BWD_OFFS, + MV_BWD_B2, + MV_BWD_B3, + MV_BWD_C2, + MV_BWD_A1, + MV_BWD_X0, + MV_BWD_X1, + MV_BWD_A3 = MV_BWD_OFFS+8, + MV_BWD_X2, + MV_BWD_X3 +}; + +static const uint8_t b_partition_flags[14] = { + 0,0,0,0,0, + FWD0|FWD1, + BWD0|BWD1, + FWD0|BWD1, + BWD0|FWD1, + FWD0|SYM1, + BWD0|SYM1, + SYM0|FWD1, + SYM0|BWD1, + SYM0|SYM1 +}; + +static const uint8_t scan3x3[4] = {4,5,7,8}; + +static const uint8_t mv_scan[4] = { + MV_FWD_X0,MV_FWD_X1, + MV_FWD_X2,MV_FWD_X3 +}; + +static const uint8_t cbp_tab[64][2] = { + {63, 0},{15,15},{31,63},{47,31},{ 0,16},{14,32},{13,47},{11,13}, + { 7,14},{ 5,11},{10,12},{ 8, 5},{12,10},{61, 7},{ 4,48},{55, 3}, + { 1, 2},{ 2, 8},{59, 4},{ 3, 1},{62,61},{ 9,55},{ 6,59},{29,62}, + {45,29},{51,27},{23,23},{39,19},{27,30},{46,28},{53, 9},{30, 6}, + {43,60},{37,21},{60,44},{16,26},{21,51},{28,35},{19,18},{35,20}, + {42,24},{26,53},{44,17},{32,37},{58,39},{24,45},{20,58},{17,43}, + {18,42},{48,46},{22,36},{33,33},{25,34},{49,40},{40,52},{36,49}, + {34,50},{50,56},{52,25},{54,22},{41,54},{56,57},{38,41},{57,38} +}; + +static const uint8_t chroma_qp[64] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15, + 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, + 32,33,34,35,36,37,38,39,40,41,42,42,43,43,44,44, + 45,45,46,46,47,47,48,48,48,49,49,49,50,50,50,51 +}; + +static const uint8_t dequant_shift[64] = { + 14,14,14,14,14,14,14,14, + 13,13,13,13,13,13,13,13, + 13,12,12,12,12,12,12,12, + 11,11,11,11,11,11,11,11, + 11,10,10,10,10,10,10,10, + 10, 9, 9, 9, 9, 9, 9, 9, + 9, 8, 8, 8, 8, 8, 8, 8, + 7, 7, 7, 7, 7, 7, 7, 7 +}; + +static const uint16_t dequant_mul[64] = { + 32768,36061,38968,42495,46341,50535,55437,60424, + 32932,35734,38968,42495,46177,50535,55109,59933, + 65535,35734,38968,42577,46341,50617,55027,60097, + 32809,35734,38968,42454,46382,50576,55109,60056, + 65535,35734,38968,42495,46320,50515,55109,60076, + 65535,35744,38968,42495,46341,50535,55099,60087, + 65535,35734,38973,42500,46341,50535,55109,60097, + 32771,35734,38965,42497,46341,50535,55109,60099 +}; + +typedef struct { + int16_t x; + int16_t y; + int16_t dist; + int16_t ref; +} vector_t; + +// marks block as unavailable, i.e. out of picture +// or not yet decoded +static const vector_t un_mv = {0,0,1,NOT_AVAIL}; + +//marks block as "no prediction from this direction" +// e.g. forward motion vector in BWD partition +static const vector_t dir_mv = {0,0,1,REF_DIR}; + +//marks block as using intra prediction +static const vector_t intra_mv = {0,0,1,REF_INTRA}; + +typedef struct residual_vlc_t { + int8_t rltab[59][3]; + int8_t level_add[26]; + int8_t golomb_order; + int inc_limit; + int8_t max_run; +} residual_vlc_t; + +static const residual_vlc_t intra_2dvlc[7] = { + { + { //level / run / table_inc + { 1, 0, 1},{ -1, 0, 1},{ 1, 1, 1},{ -1, 1, 1},{ 1, 2, 1},{ -1, 2, 1}, + { 1, 3, 1},{ -1, 3, 1},{ 1, 4, 1},{ -1, 4, 1},{ 1, 5, 1},{ -1, 5, 1}, + { 1, 6, 1},{ -1, 6, 1},{ 1, 7, 1},{ -1, 7, 1},{ 1, 8, 1},{ -1, 8, 1}, + { 1, 9, 1},{ -1, 9, 1},{ 1,10, 1},{ -1,10, 1},{ 2, 0, 2},{ -2, 0, 2}, + { 1,11, 1},{ -1,11, 1},{ 1,12, 1},{ -1,12, 1},{ 1,13, 1},{ -1,13, 1}, + { 1,14, 1},{ -1,14, 1},{ 2, 1, 2},{ -2, 1, 2},{ 1,15, 1},{ -1,15, 1}, + { 1,16, 1},{ -1,16, 1},{ 3, 0, 3},{ -3, 0, 3},{ 1,17, 1},{ -1,17, 1}, + { 1,18, 1},{ -1,18, 1},{ 2, 2, 2},{ -2, 2, 2},{ 1,19, 1},{ -1,19, 1}, + { 1,20, 1},{ -1,20, 1},{ 2, 3, 2},{ -2, 3, 2},{ 1,21, 1},{ -1,21, 1}, + { 2, 4, 2},{ -2, 4, 2},{ 1,22, 1},{ -1,22, 1},{ 0, 0,-1} + }, + //level_add + { 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2,-1,-1,-1}, + 2, //golomb_order + 0, //inc_limit + 22, //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 2, 0, 1},{ -2, 0, 1}, + { 1, 2, 0},{ -1, 2, 0},{ 0, 0, 0},{ 1, 3, 0},{ -1, 3, 0},{ 1, 4, 0}, + { -1, 4, 0},{ 1, 5, 0},{ -1, 5, 0},{ 3, 0, 2},{ -3, 0, 2},{ 2, 1, 1}, + { -2, 1, 1},{ 1, 6, 0},{ -1, 6, 0},{ 1, 7, 0},{ -1, 7, 0},{ 1, 8, 0}, + { -1, 8, 0},{ 2, 2, 1},{ -2, 2, 1},{ 4, 0, 2},{ -4, 0, 2},{ 1, 9, 0}, + { -1, 9, 0},{ 1,10, 0},{ -1,10, 0},{ 2, 3, 1},{ -2, 3, 1},{ 3, 1, 2}, + { -3, 1, 2},{ 1,11, 0},{ -1,11, 0},{ 2, 4, 1},{ -2, 4, 1},{ 5, 0, 3}, + { -5, 0, 3},{ 1,12, 0},{ -1,12, 0},{ 2, 5, 1},{ -2, 5, 1},{ 1,13, 0}, + { -1,13, 0},{ 2, 6, 1},{ -2, 6, 1},{ 2, 7, 1},{ -2, 7, 1},{ 3, 2, 2}, + { -3, 2, 2},{ 6, 0, 3},{ -6, 0, 3},{ 1,14, 0},{ -1,14, 0} + }, + //level_add + { 7, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 1, //inc_limit + 14, //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 1, 1, 0},{ -1, 1, 0}, + { 3, 0, 1},{ -3, 0, 1},{ 0, 0, 0},{ 1, 2, 0},{ -1, 2, 0},{ 2, 1, 0}, + { -2, 1, 0},{ 4, 0, 1},{ -4, 0, 1},{ 1, 3, 0},{ -1, 3, 0},{ 5, 0, 2}, + { -5, 0, 2},{ 1, 4, 0},{ -1, 4, 0},{ 3, 1, 1},{ -3, 1, 1},{ 2, 2, 0}, + { -2, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 6, 0, 2},{ -6, 0, 2},{ 2, 3, 0}, + { -2, 3, 0},{ 1, 6, 0},{ -1, 6, 0},{ 4, 1, 1},{ -4, 1, 1},{ 7, 0, 2}, + { -7, 0, 2},{ 3, 2, 1},{ -3, 2, 1},{ 2, 4, 0},{ -2, 4, 0},{ 1, 7, 0}, + { -1, 7, 0},{ 2, 5, 0},{ -2, 5, 0},{ 8, 0, 3},{ -8, 0, 3},{ 1, 8, 0}, + { -1, 8, 0},{ 5, 1, 2},{ -5, 1, 2},{ 3, 3, 1},{ -3, 3, 1},{ 2, 6, 0}, + { -2, 6, 0},{ 9, 0, 3},{ -9, 0, 3},{ 1, 9, 0},{ -1, 9, 0} + }, + //level_add + {10, 6, 4, 4, 3, 3, 3, 2, 2, 2,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 2, //inc_limit + 9, //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0},{ -3, 0, 0}, + { 1, 1, 0},{ -1, 1, 0},{ 0, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 1}, + { -5, 0, 1},{ 2, 1, 0},{ -2, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 6, 0, 1}, + { -6, 0, 1},{ 3, 1, 0},{ -3, 1, 0},{ 7, 0, 1},{ -7, 0, 1},{ 1, 3, 0}, + { -1, 3, 0},{ 8, 0, 2},{ -8, 0, 2},{ 2, 2, 0},{ -2, 2, 0},{ 4, 1, 0}, + { -4, 1, 0},{ 1, 4, 0},{ -1, 4, 0},{ 9, 0, 2},{ -9, 0, 2},{ 5, 1, 1}, + { -5, 1, 1},{ 2, 3, 0},{ -2, 3, 0},{ 10, 0, 2},{-10, 0, 2},{ 3, 2, 0}, + { -3, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 11, 0, 3},{-11, 0, 3},{ 6, 1, 1}, + { -6, 1, 1},{ 1, 6, 0},{ -1, 6, 0},{ 2, 4, 0},{ -2, 4, 0},{ 3, 3, 0}, + { -3, 3, 0},{ 12, 0, 3},{-12, 0, 3},{ 4, 2, 0},{ -4, 2, 0} + }, + //level_add + {13, 7, 5, 4, 3, 2, 2,-1,-1,-1 -1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 4, //inc_limit + 6, //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0},{ -3, 0, 0}, + { 0, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0},{ -5, 0, 0},{ 6, 0, 0}, + { -6, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 7, 0, 0},{ -7, 0, 0},{ 8, 0, 1}, + { -8, 0, 1},{ 2, 1, 0},{ -2, 1, 0},{ 9, 0, 1},{ -9, 0, 1},{ 10, 0, 1}, + {-10, 0, 1},{ 1, 2, 0},{ -1, 2, 0},{ 3, 1, 0},{ -3, 1, 0},{ 11, 0, 2}, + {-11, 0, 2},{ 4, 1, 0},{ -4, 1, 0},{ 12, 0, 2},{-12, 0, 2},{ 13, 0, 2}, + {-13, 0, 2},{ 5, 1, 0},{ -5, 1, 0},{ 1, 3, 0},{ -1, 3, 0},{ 2, 2, 0}, + { -2, 2, 0},{ 14, 0, 2},{-14, 0, 2},{ 6, 1, 0},{ -6, 1, 0},{ 15, 0, 2}, + {-15, 0, 2},{ 16, 0, 2},{-16, 0, 2},{ 3, 2, 0},{ -3, 2, 0},{ 1, 4, 0}, + { -1, 4, 0},{ 7, 1, 0},{ -7, 1, 0},{ 17, 0, 2},{-17, 0, 2}, + }, + //level_add + {18, 8, 4, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 7, //inc_limit + 4, //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0},{ -5, 0, 0},{ 6, 0, 0}, + { -6, 0, 0},{ 7, 0, 0},{ -7, 0, 0},{ 8, 0, 0},{ -8, 0, 0},{ 9, 0, 0}, + { -9, 0, 0},{ 10, 0, 0},{-10, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 11, 0, 1}, + {-11, 0, 1},{ 12, 0, 1},{-12, 0, 1},{ 13, 0, 1},{-13, 0, 1},{ 2, 1, 0}, + { -2, 1, 0},{ 14, 0, 1},{-14, 0, 1},{ 15, 0, 1},{-15, 0, 1},{ 3, 1, 0}, + { -3, 1, 0},{ 16, 0, 1},{-16, 0, 1},{ 1, 2, 0},{ -1, 2, 0},{ 17, 0, 1}, + {-17, 0, 1},{ 4, 1, 0},{ -4, 1, 0},{ 18, 0, 1},{-18, 0, 1},{ 5, 1, 0}, + { -5, 1, 0},{ 19, 0, 1},{-19, 0, 1},{ 20, 0, 1},{-20, 0, 1},{ 6, 1, 0}, + { -6, 1, 0},{ 21, 0, 1},{-21, 0, 1},{ 2, 2, 0},{ -2, 2, 0}, + }, + //level_add + {22, 7, 3,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 10, //inc_limit + 2, //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0},{ -5, 0, 0},{ 6, 0, 0}, + { -6, 0, 0},{ 7, 0, 0},{ -7, 0, 0},{ 8, 0, 0},{ -8, 0, 0},{ 9, 0, 0}, + { -9, 0, 0},{ 10, 0, 0},{-10, 0, 0},{ 11, 0, 0},{-11, 0, 0},{ 12, 0, 0}, + {-12, 0, 0},{ 13, 0, 0},{-13, 0, 0},{ 14, 0, 0},{-14, 0, 0},{ 15, 0, 0}, + {-15, 0, 0},{ 16, 0, 0},{-16, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 17, 0, 0}, + {-17, 0, 0},{ 18, 0, 0},{-18, 0, 0},{ 19, 0, 0},{-19, 0, 0},{ 20, 0, 0}, + {-20, 0, 0},{ 21, 0, 0},{-21, 0, 0},{ 2, 1, 0},{ -2, 1, 0},{ 22, 0, 0}, + {-22, 0, 0},{ 23, 0, 0},{-23, 0, 0},{ 24, 0, 0},{-24, 0, 0},{ 25, 0, 0}, + {-25, 0, 0},{ 3, 1, 0},{ -3, 1, 0},{ 26, 0, 0},{-26, 0, 0} + }, + //level_add + {27, 4,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + INT_MAX, //inc_limit + 1, //max_run + } +}; + +static const residual_vlc_t inter_2dvlc[7] = { + { + { //level / run + { 1, 0, 1},{ -1, 0, 1},{ 1, 1, 1},{ -1, 1, 1},{ 1, 2, 1},{ -1, 2, 1}, + { 1, 3, 1},{ -1, 3, 1},{ 1, 4, 1},{ -1, 4, 1},{ 1, 5, 1},{ -1, 5, 1}, + { 1, 6, 1},{ -1, 6, 1},{ 1, 7, 1},{ -1, 7, 1},{ 1, 8, 1},{ -1, 8, 1}, + { 1, 9, 1},{ -1, 9, 1},{ 1,10, 1},{ -1,10, 1},{ 1,11, 1},{ -1,11, 1}, + { 1,12, 1},{ -1,12, 1},{ 2, 0, 2},{ -2, 0, 2},{ 1,13, 1},{ -1,13, 1}, + { 1,14, 1},{ -1,14, 1},{ 1,15, 1},{ -1,15, 1},{ 1,16, 1},{ -1,16, 1}, + { 1,17, 1},{ -1,17, 1},{ 1,18, 1},{ -1,18, 1},{ 3, 0, 3},{ -3, 0, 3}, + { 1,19, 1},{ -1,19, 1},{ 1,20, 1},{ -1,20, 1},{ 2, 1, 2},{ -2, 1, 2}, + { 1,21, 1},{ -1,21, 1},{ 1,22, 1},{ -1,22, 1},{ 1,23, 1},{ -1,23, 1}, + { 1,24, 1},{ -1,24, 1},{ 1,25, 1},{ -1,25, 1},{ 0, 0,-1} + }, + //level_add + { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, + 3, //golomb_order + 0, //inc_limit + 25 //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 0, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 1, 2, 0}, + { -1, 2, 0},{ 1, 3, 0},{ -1, 3, 0},{ 1, 4, 0},{ -1, 4, 0},{ 1, 5, 0}, + { -1, 5, 0},{ 2, 0, 1},{ -2, 0, 1},{ 1, 6, 0},{ -1, 6, 0},{ 1, 7, 0}, + { -1, 7, 0},{ 1, 8, 0},{ -1, 8, 0},{ 1, 9, 0},{ -1, 9, 0},{ 2, 1, 1}, + { -2, 1, 1},{ 1,10, 0},{ -1,10, 0},{ 1,11, 0},{ -1,11, 0},{ 3, 0, 2}, + { -3, 0, 2},{ 1,12, 0},{ -1,12, 0},{ 1,13, 0},{ -1,13, 0},{ 2, 2, 1}, + { -2, 2, 1},{ 1,14, 0},{ -1,14, 0},{ 2, 3, 1},{ -2, 3, 1},{ 1,15, 0}, + { -1,15, 0},{ 2, 4, 1},{ -2, 4, 1},{ 1,16, 0},{ -1,16, 0},{ 4, 0, 3}, + { -4, 0, 3},{ 2, 5, 1},{ -2, 5, 1},{ 1,17, 0},{ -1,17, 0},{ 1,18, 0}, + { -1,18, 0},{ 2, 6, 1},{ -2, 6, 1},{ 3, 1, 2},{ -3, 1, 2}, + }, + //level_add + { 5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 1, //inc_limit + 18 //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 0, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 2, 0, 0}, + { -2, 0, 0},{ 1, 2, 0},{ -1, 2, 0},{ 1, 3, 0},{ -1, 3, 0},{ 3, 0, 1}, + { -3, 0, 1},{ 2, 1, 0},{ -2, 1, 0},{ 1, 4, 0},{ -1, 4, 0},{ 1, 5, 0}, + { -1, 5, 0},{ 1, 6, 0},{ -1, 6, 0},{ 2, 2, 0},{ -2, 2, 0},{ 4, 0, 2}, + { -4, 0, 2},{ 1, 7, 0},{ -1, 7, 0},{ 3, 1, 1},{ -3, 1, 1},{ 2, 3, 0}, + { -2, 3, 0},{ 1, 8, 0},{ -1, 8, 0},{ 1, 9, 0},{ -1, 9, 0},{ 5, 0, 2}, + { -5, 0, 2},{ 2, 4, 0},{ -2, 4, 0},{ 1,10, 0},{ -1,10, 0},{ 2, 5, 0}, + { -2, 5, 0},{ 1,11, 0},{ -1,11, 0},{ 3, 2, 1},{ -3, 2, 1},{ 6, 0, 2}, + { -6, 0, 2},{ 4, 1, 2},{ -4, 1, 2},{ 1,12, 0},{ -1,12, 0},{ 2, 6, 0}, + { -2, 6, 0},{ 3, 3, 1},{ -3, 3, 1},{ 1,13, 0},{ -1,13, 0}, + }, + //level_add + { 7, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 2, //inc_limit + 13 //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 0, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 1, 1, 0}, + { -1, 1, 0},{ 3, 0, 0},{ -3, 0, 0},{ 1, 2, 0},{ -1, 2, 0},{ 2, 1, 0}, + { -2, 1, 0},{ 4, 0, 1},{ -4, 0, 1},{ 1, 3, 0},{ -1, 3, 0},{ 5, 0, 1}, + { -5, 0, 1},{ 1, 4, 0},{ -1, 4, 0},{ 3, 1, 0},{ -3, 1, 0},{ 2, 2, 0}, + { -2, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 6, 0, 1},{ -6, 0, 1},{ 2, 3, 0}, + { -2, 3, 0},{ 1, 6, 0},{ -1, 6, 0},{ 4, 1, 1},{ -4, 1, 1},{ 7, 0, 2}, + { -7, 0, 2},{ 3, 2, 0},{ -3, 2, 0},{ 1, 7, 0},{ -1, 7, 0},{ 2, 4, 0}, + { -2, 4, 0},{ 8, 0, 2},{ -8, 0, 2},{ 1, 8, 0},{ -1, 8, 0},{ 3, 3, 0}, + { -3, 3, 0},{ 2, 5, 0},{ -2, 5, 0},{ 5, 1, 1},{ -5, 1, 1},{ 1, 9, 0}, + { -1, 9, 0},{ 9, 0, 2},{ -9, 0, 2},{ 4, 2, 1},{ -4, 2, 1}, + }, + //level_add + {10, 6, 5, 4, 3, 3, 2, 2, 2, 2,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 3, //inc_limit + 9 //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 0, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0}, + { -5, 0, 0},{ 2, 1, 0},{ -2, 1, 0},{ 1, 2, 0},{ -1, 2, 0},{ 6, 0, 0}, + { -6, 0, 0},{ 3, 1, 0},{ -3, 1, 0},{ 7, 0, 1},{ -7, 0, 1},{ 1, 3, 0}, + { -1, 3, 0},{ 8, 0, 1},{ -8, 0, 1},{ 2, 2, 0},{ -2, 2, 0},{ 4, 1, 0}, + { -4, 1, 0},{ 1, 4, 0},{ -1, 4, 0},{ 9, 0, 1},{ -9, 0, 1},{ 5, 1, 0}, + { -5, 1, 0},{ 2, 3, 0},{ -2, 3, 0},{ 1, 5, 0},{ -1, 5, 0},{ 10, 0, 2}, + {-10, 0, 2},{ 3, 2, 0},{ -3, 2, 0},{ 11, 0, 2},{-11, 0, 2},{ 1, 6, 0}, + { -1, 6, 0},{ 6, 1, 0},{ -6, 1, 0},{ 3, 3, 0},{ -3, 3, 0},{ 2, 4, 0}, + { -2, 4, 0},{ 12, 0, 2},{-12, 0, 2},{ 4, 2, 0},{ -4, 2, 0}, + }, + //level_add + {13, 7, 5, 4, 3, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 6, //inc_limit + 6 //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0},{ -5, 0, 0},{ 1, 1, 0}, + { -1, 1, 0},{ 6, 0, 0},{ -6, 0, 0},{ 7, 0, 0},{ -7, 0, 0},{ 8, 0, 0}, + { -8, 0, 0},{ 2, 1, 0},{ -2, 1, 0},{ 9, 0, 0},{ -9, 0, 0},{ 1, 2, 0}, + { -1, 2, 0},{ 10, 0, 1},{-10, 0, 1},{ 3, 1, 0},{ -3, 1, 0},{ 11, 0, 1}, + {-11, 0, 1},{ 4, 1, 0},{ -4, 1, 0},{ 12, 0, 1},{-12, 0, 1},{ 1, 3, 0}, + { -1, 3, 0},{ 2, 2, 0},{ -2, 2, 0},{ 13, 0, 1},{-13, 0, 1},{ 5, 1, 0}, + { -5, 1, 0},{ 14, 0, 1},{-14, 0, 1},{ 6, 1, 0},{ -6, 1, 0},{ 1, 4, 0}, + { -1, 4, 0},{ 15, 0, 1},{-15, 0, 1},{ 3, 2, 0},{ -3, 2, 0},{ 16, 0, 1}, + {-16, 0, 1},{ 2, 3, 0},{ -2, 3, 0},{ 7, 1, 0},{ -7, 1, 0}, + }, + //level_add + {17, 8, 4, 3, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + 9, //inc_limit + 4 //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0},{ -5, 0, 0},{ 6, 0, 0}, + { -6, 0, 0},{ 7, 0, 0},{ -7, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 8, 0, 0}, + { -8, 0, 0},{ 9, 0, 0},{ -9, 0, 0},{ 10, 0, 0},{-10, 0, 0},{ 11, 0, 0}, + {-11, 0, 0},{ 12, 0, 0},{-12, 0, 0},{ 2, 1, 0},{ -2, 1, 0},{ 13, 0, 0}, + {-13, 0, 0},{ 1, 2, 0},{ -1, 2, 0},{ 14, 0, 0},{-14, 0, 0},{ 15, 0, 0}, + {-15, 0, 0},{ 3, 1, 0},{ -3, 1, 0},{ 16, 0, 0},{-16, 0, 0},{ 17, 0, 0}, + {-17, 0, 0},{ 18, 0, 0},{-18, 0, 0},{ 4, 1, 0},{ -4, 1, 0},{ 19, 0, 0}, + {-19, 0, 0},{ 20, 0, 0},{-20, 0, 0},{ 2, 2, 0},{ -2, 2, 0},{ 1, 3, 0}, + { -1, 3, 0},{ 5, 1, 0},{ -5, 1, 0},{ 21, 0, 0},{-21, 0, 0}, + }, + //level_add + {22, 6, 3, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 2, //golomb_order + INT_MAX, //inc_limit + 3 //max_run + } +}; + +static const residual_vlc_t chroma_2dvlc[5] = { + { + { //level / run + { 1, 0, 1},{ -1, 0, 1},{ 1, 1, 1},{ -1, 1, 1},{ 1, 2, 1},{ -1, 2, 1}, + { 1, 3, 1},{ -1, 3, 1},{ 1, 4, 1},{ -1, 4, 1},{ 1, 5, 1},{ -1, 5, 1}, + { 1, 6, 1},{ -1, 6, 1},{ 2, 0, 2},{ -2, 0, 2},{ 1, 7, 1},{ -1, 7, 1}, + { 1, 8, 1},{ -1, 8, 1},{ 1, 9, 1},{ -1, 9, 1},{ 1,10, 1},{ -1,10, 1}, + { 1,11, 1},{ -1,11, 1},{ 1,12, 1},{ -1,12, 1},{ 1,13, 1},{ -1,13, 1}, + { 1,14, 1},{ -1,14, 1},{ 3, 0, 3},{ -3, 0, 3},{ 1,15, 1},{ -1,15, 1}, + { 1,16, 1},{ -1,16, 1},{ 1,17, 1},{ -1,17, 1},{ 1,18, 1},{ -1,18, 1}, + { 1,19, 1},{ -1,19, 1},{ 1,20, 1},{ -1,20, 1},{ 1,21, 1},{ -1,21, 1}, + { 2, 1, 2},{ -2, 1, 2},{ 1,22, 1},{ -1,22, 1},{ 1,23, 1},{ -1,23, 1}, + { 1,24, 1},{ -1,24, 1},{ 4, 0, 3},{ -4, 0, 3},{ 0, 0,-1} + }, + //level_add + { 5, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2,-1}, + 2, //golomb_order + 0, //inc_limit + 24, //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 2, 0, 1}, + { -2, 0, 1},{ 1, 2, 0},{ -1, 2, 0},{ 1, 3, 0},{ -1, 3, 0},{ 1, 4, 0}, + { -1, 4, 0},{ 1, 5, 0},{ -1, 5, 0},{ 3, 0, 2},{ -3, 0, 2},{ 1, 6, 0}, + { -1, 6, 0},{ 1, 7, 0},{ -1, 7, 0},{ 2, 1, 1},{ -2, 1, 1},{ 1, 8, 0}, + { -1, 8, 0},{ 1, 9, 0},{ -1, 9, 0},{ 1,10, 0},{ -1,10, 0},{ 4, 0, 2}, + { -4, 0, 2},{ 1,11, 0},{ -1,11, 0},{ 1,12, 0},{ -1,12, 0},{ 1,13, 0}, + { -1,13, 0},{ 2, 2, 1},{ -2, 2, 1},{ 1,14, 0},{ -1,14, 0},{ 2, 3, 1}, + { -2, 3, 1},{ 5, 0, 3},{ -5, 0, 3},{ 3, 1, 2},{ -3, 1, 2},{ 1,15, 0}, + { -1,15, 0},{ 1,16, 0},{ -1,16, 0},{ 1,17, 0},{ -1,17, 0},{ 2, 4, 1}, + { -2, 4, 1},{ 1,18, 0},{ -1,18, 0},{ 1,19, 0},{ -1,19, 0}, + }, + //level_add + { 6, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2,-1,-1,-1,-1,-1,-1}, + 0, //golomb_order + 1, //inc_limit + 19, //max_run + },{ + { //level / run + { 1, 0, 0},{ -1, 0, 0},{ 0, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 1, 1, 0}, + { -1, 1, 0},{ 3, 0, 1},{ -3, 0, 1},{ 1, 2, 0},{ -1, 2, 0},{ 4, 0, 1}, + { -4, 0, 1},{ 2, 1, 0},{ -2, 1, 0},{ 1, 3, 0},{ -1, 3, 0},{ 5, 0, 2}, + { -5, 0, 2},{ 1, 4, 0},{ -1, 4, 0},{ 3, 1, 1},{ -3, 1, 1},{ 2, 2, 0}, + { -2, 2, 0},{ 1, 5, 0},{ -1, 5, 0},{ 6, 0, 2},{ -6, 0, 2},{ 1, 6, 0}, + { -1, 6, 0},{ 2, 3, 0},{ -2, 3, 0},{ 7, 0, 2},{ -7, 0, 2},{ 1, 7, 0}, + { -1, 7, 0},{ 4, 1, 1},{ -4, 1, 1},{ 1, 8, 0},{ -1, 8, 0},{ 3, 2, 1}, + { -3, 2, 1},{ 2, 4, 0},{ -2, 4, 0},{ 2, 5, 0},{ -2, 5, 0},{ 8, 0, 2}, + { -8, 0, 2},{ 1, 9, 0},{ -1, 9, 0},{ 1,10, 0},{ -1,10, 0},{ 9, 0, 2}, + { -9, 0, 2},{ 5, 1, 2},{ -5, 1, 2},{ 3, 3, 1},{ -3, 3, 1}, + }, + //level_add + {10, 6, 4, 4, 3, 3, 2, 2, 2, 2, 2,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 1, //golomb_order + 2, //inc_limit + 10, //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 1, 1, 0},{ -1, 1, 0},{ 5, 0, 1}, + { -5, 0, 1},{ 2, 1, 0},{ -2, 1, 0},{ 6, 0, 1},{ -6, 0, 1},{ 1, 2, 0}, + { -1, 2, 0},{ 7, 0, 1},{ -7, 0, 1},{ 3, 1, 0},{ -3, 1, 0},{ 8, 0, 1}, + { -8, 0, 1},{ 1, 3, 0},{ -1, 3, 0},{ 2, 2, 0},{ -2, 2, 0},{ 9, 0, 1}, + { -9, 0, 1},{ 4, 1, 0},{ -4, 1, 0},{ 1, 4, 0},{ -1, 4, 0},{ 10, 0, 1}, + {-10, 0, 1},{ 3, 2, 0},{ -3, 2, 0},{ 5, 1, 1},{ -5, 1, 1},{ 2, 3, 0}, + { -2, 3, 0},{ 11, 0, 1},{-11, 0, 1},{ 1, 5, 0},{ -1, 5, 0},{ 12, 0, 1}, + {-12, 0, 1},{ 1, 6, 0},{ -1, 6, 0},{ 6, 1, 1},{ -6, 1, 1},{ 13, 0, 1}, + {-13, 0, 1},{ 2, 4, 0},{ -2, 4, 0},{ 1, 7, 0},{ -1, 7, 0}, + }, + //level_add + {14, 7, 4, 3, 3, 2, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 1, //golomb_order + 4, //inc_limit + 7, //max_run + },{ + { //level / run + { 0, 0, 0},{ 1, 0, 0},{ -1, 0, 0},{ 2, 0, 0},{ -2, 0, 0},{ 3, 0, 0}, + { -3, 0, 0},{ 4, 0, 0},{ -4, 0, 0},{ 5, 0, 0},{ -5, 0, 0},{ 6, 0, 0}, + { -6, 0, 0},{ 7, 0, 0},{ -7, 0, 0},{ 8, 0, 0},{ -8, 0, 0},{ 1, 1, 0}, + { -1, 1, 0},{ 9, 0, 0},{ -9, 0, 0},{ 10, 0, 0},{-10, 0, 0},{ 11, 0, 0}, + {-11, 0, 0},{ 2, 1, 0},{ -2, 1, 0},{ 12, 0, 0},{-12, 0, 0},{ 13, 0, 0}, + {-13, 0, 0},{ 3, 1, 0},{ -3, 1, 0},{ 14, 0, 0},{-14, 0, 0},{ 1, 2, 0}, + { -1, 2, 0},{ 15, 0, 0},{-15, 0, 0},{ 4, 1, 0},{ -4, 1, 0},{ 16, 0, 0}, + {-16, 0, 0},{ 17, 0, 0},{-17, 0, 0},{ 5, 1, 0},{ -5, 1, 0},{ 1, 3, 0}, + { -1, 3, 0},{ 2, 2, 0},{ -2, 2, 0},{ 18, 0, 0},{-18, 0, 0},{ 6, 1, 0}, + { -6, 1, 0},{ 19, 0, 0},{-19, 0, 0},{ 1, 4, 0},{ -1, 4, 0}, + }, + //level_add + {20, 7, 3, 2, 2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, + -1,-1,-1,-1,-1,-1,-1,-1,-1,-1}, + 0, //golomb_order + INT_MAX, //inc_limit + 4, //max_run + } +}; + +static const uint8_t alpha_tab[64] = { + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 18, 20, + 22, 24, 26, 28, 30, 33, 33, 35, 35, 36, 37, 37, 39, 39, 42, 44, + 46, 48, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 +}; + +static const uint8_t beta_tab[64] = { + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, + 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, + 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27 +}; + +static const uint8_t tc_tab[64] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, + 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, + 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9 +}; + +static const int_fast8_t left_modifier_l[8] = { 0,-1, 6,-1,-1, 7, 6, 7}; +static const int_fast8_t top_modifier_l[8] = {-1, 1, 5,-1,-1, 5, 7, 7}; +static const int_fast8_t left_modifier_c[7] = { 5,-1, 2,-1, 6, 5, 6}; +static const int_fast8_t top_modifier_c[7] = { 4, 1,-1,-1, 4, 6, 6}; diff --git a/libavcodec/cavsdsp.c b/libavcodec/cavsdsp.c new file mode 100644 index 0000000000..71aaddba58 --- /dev/null +++ b/libavcodec/cavsdsp.c @@ -0,0 +1,511 @@ +/* + * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. + * + * DSP functions + * + * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <stdio.h> +#include "dsputil.h" +#include "cavsdsp.h" + +/***************************************************************************** + * + * in-loop deblocking filter + * + ****************************************************************************/ + +#define P2 p0_p[-3*stride] +#define P1 p0_p[-2*stride] +#define P0 p0_p[-1*stride] +#define Q0 p0_p[ 0*stride] +#define Q1 p0_p[ 1*stride] +#define Q2 p0_p[ 2*stride] + +static inline void loop_filter_l2(uint8_t *p0_p,int stride,int alpha, int beta) { + int p0 = P0; + int q0 = Q0; + + if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) { + int s = p0 + q0 + 2; + alpha = (alpha>>2) + 2; + if(abs(P2-p0) < beta && abs(p0-q0) < alpha) { + P0 = (P1 + p0 + s) >> 2; + P1 = (2*P1 + s) >> 2; + } else + P0 = (2*P1 + s) >> 2; + if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) { + Q0 = (Q1 + q0 + s) >> 2; + Q1 = (2*Q1 + s) >> 2; + } else + Q0 = (2*Q1 + s) >> 2; + } +} + +static inline void loop_filter_l1(uint8_t *p0_p, int stride, int alpha, int beta, int tc) { + int p0 = P0; + int q0 = Q0; + + if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) { + int delta = clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc); + P0 = clip_uint8(p0+delta); + Q0 = clip_uint8(q0-delta); + if(abs(P2-p0)<beta) { + delta = clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc); + P1 = clip_uint8(P1+delta); + } + if(abs(Q2-q0)<beta) { + delta = clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc); + Q1 = clip_uint8(Q1-delta); + } + } +} + +static inline void loop_filter_c2(uint8_t *p0_p,int stride,int alpha, int beta) { + int p0 = P0; + int q0 = Q0; + + if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) { + int s = p0 + q0 + 2; + alpha = (alpha>>2) + 2; + if(abs(P2-p0) < beta && abs(p0-q0) < alpha) { + P0 = (P1 + p0 + s) >> 2; + } else + P0 = (2*P1 + s) >> 2; + if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) { + Q0 = (Q1 + q0 + s) >> 2; + } else + Q0 = (2*Q1 + s) >> 2; + } +} + +static inline void loop_filter_c1(uint8_t *p0_p,int stride,int alpha, int beta, + int tc) { + if(abs(P0-Q0)<alpha && abs(P1-P0)<beta && abs(Q1-Q0)<beta) { + int delta = clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc); + P0 = clip_uint8(P0+delta); + Q0 = clip_uint8(Q0-delta); + } +} + +#undef P0 +#undef P1 +#undef P2 +#undef Q0 +#undef Q1 +#undef Q2 + +void cavs_filter_lv_c(uint8_t *d, int stride, int alpha, int beta, int tc, + int bs1, int bs2) { + int i; + if(bs1==2) + for(i=0;i<16;i++) + loop_filter_l2(d + i*stride,1,alpha,beta); + else { + if(bs1) + for(i=0;i<8;i++) + loop_filter_l1(d + i*stride,1,alpha,beta,tc); + if (bs2) + for(i=8;i<16;i++) + loop_filter_l1(d + i*stride,1,alpha,beta,tc); + } +} + +void cavs_filter_lh_c(uint8_t *d, int stride, int alpha, int beta, int tc, + int bs1, int bs2) { + int i; + if(bs1==2) + for(i=0;i<16;i++) + loop_filter_l2(d + i,stride,alpha,beta); + else { + if(bs1) + for(i=0;i<8;i++) + loop_filter_l1(d + i,stride,alpha,beta,tc); + if (bs2) + for(i=8;i<16;i++) + loop_filter_l1(d + i,stride,alpha,beta,tc); + } +} + +void cavs_filter_cv_c(uint8_t *d, int stride, int alpha, int beta, int tc, + int bs1, int bs2) { + int i; + if(bs1==2) + for(i=0;i<8;i++) + loop_filter_c2(d + i*stride,1,alpha,beta); + else { + if(bs1) + for(i=0;i<4;i++) + loop_filter_c1(d + i*stride,1,alpha,beta,tc); + if (bs2) + for(i=4;i<8;i++) + loop_filter_c1(d + i*stride,1,alpha,beta,tc); + } +} + +void cavs_filter_ch_c(uint8_t *d, int stride, int alpha, int beta, int tc, + int bs1, int bs2) { + int i; + if(bs1==2) + for(i=0;i<8;i++) + loop_filter_c2(d + i,stride,alpha,beta); + else { + if(bs1) + for(i=0;i<4;i++) + loop_filter_c1(d + i,stride,alpha,beta,tc); + if (bs2) + for(i=4;i<8;i++) + loop_filter_c1(d + i,stride,alpha,beta,tc); + } +} + +/***************************************************************************** + * + * inverse transform + * + ****************************************************************************/ + +void cavs_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride) { + int i; + DCTELEM (*src)[8] = (DCTELEM(*)[8])block; + uint8_t *cm = cropTbl + MAX_NEG_CROP; + + for( i = 0; i < 8; i++ ) { + const int a0 = 3*src[i][1] - (src[i][7]<<1); + const int a1 = 3*src[i][3] + (src[i][5]<<1); + const int a2 = (src[i][3]<<1) - 3*src[i][5]; + const int a3 = (src[i][1]<<1) + 3*src[i][7]; + + const int b4 = ((a0 + a1 + a3)<<1) + a1; + const int b5 = ((a0 - a1 + a2)<<1) + a0; + const int b6 = ((a3 - a2 - a1)<<1) + a3; + const int b7 = ((a0 - a2 - a3)<<1) - a2; + + const int a7 = (src[i][2]<<2) - 10*src[i][6]; + const int a6 = (src[i][6]<<2) + 10*src[i][2]; + const int a5 = (src[i][0] - src[i][4]) << 3; + const int a4 = (src[i][0] + src[i][4]) << 3; + + const int b0 = a4 + a6; + const int b1 = a5 + a7; + const int b2 = a5 - a7; + const int b3 = a4 - a6; + + src[i][0] = (b0 + b4 + 4) >> 3; + src[i][1] = (b1 + b5 + 4) >> 3; + src[i][2] = (b2 + b6 + 4) >> 3; + src[i][3] = (b3 + b7 + 4) >> 3; + src[i][4] = (b3 - b7 + 4) >> 3; + src[i][5] = (b2 - b6 + 4) >> 3; + src[i][6] = (b1 - b5 + 4) >> 3; + src[i][7] = (b0 - b4 + 4) >> 3; + } + for( i = 0; i < 8; i++ ) { + const int a0 = 3*src[1][i] - (src[7][i]<<1); + const int a1 = 3*src[3][i] + (src[5][i]<<1); + const int a2 = (src[3][i]<<1) - 3*src[5][i]; + const int a3 = (src[1][i]<<1) + 3*src[7][i]; + + const int b4 = ((a0 + a1 + a3)<<1) + a1; + const int b5 = ((a0 - a1 + a2)<<1) + a0; + const int b6 = ((a3 - a2 - a1)<<1) + a3; + const int b7 = ((a0 - a2 - a3)<<1) - a2; + + const int a7 = (src[2][i]<<2) - 10*src[6][i]; + const int a6 = (src[6][i]<<2) + 10*src[2][i]; + const int a5 = (src[0][i] - src[4][i]) << 3; + const int a4 = (src[0][i] + src[4][i]) << 3; + + const int b0 = a4 + a6; + const int b1 = a5 + a7; + const int b2 = a5 - a7; + const int b3 = a4 - a6; + + dst[i + 0*stride] = cm[ dst[i + 0*stride] + ((b0 + b4 + 64) >> 7)]; + dst[i + 1*stride] = cm[ dst[i + 1*stride] + ((b1 + b5 + 64) >> 7)]; + dst[i + 2*stride] = cm[ dst[i + 2*stride] + ((b2 + b6 + 64) >> 7)]; + dst[i + 3*stride] = cm[ dst[i + 3*stride] + ((b3 + b7 + 64) >> 7)]; + dst[i + 4*stride] = cm[ dst[i + 4*stride] + ((b3 - b7 + 64) >> 7)]; + dst[i + 5*stride] = cm[ dst[i + 5*stride] + ((b2 - b6 + 64) >> 7)]; + dst[i + 6*stride] = cm[ dst[i + 6*stride] + ((b1 - b5 + 64) >> 7)]; + dst[i + 7*stride] = cm[ dst[i + 7*stride] + ((b0 - b4 + 64) >> 7)]; + } +} + +/***************************************************************************** + * + * motion compensation + * + ****************************************************************************/ + +#define CAVS_SUBPIX(OPNAME, OP, NAME, A, B, C, D, E, F) \ +static void OPNAME ## cavs_filt8_h_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ + const int h=8;\ + uint8_t *cm = cropTbl + MAX_NEG_CROP;\ + int i;\ + for(i=0; i<h; i++)\ + {\ + OP(dst[0], A*src[-2] + B*src[-1] + C*src[0] + D*src[1] + E*src[2] + F*src[3]);\ + OP(dst[1], A*src[-1] + B*src[ 0] + C*src[1] + D*src[2] + E*src[3] + F*src[4]);\ + OP(dst[2], A*src[ 0] + B*src[ 1] + C*src[2] + D*src[3] + E*src[4] + F*src[5]);\ + OP(dst[3], A*src[ 1] + B*src[ 2] + C*src[3] + D*src[4] + E*src[5] + F*src[6]);\ + OP(dst[4], A*src[ 2] + B*src[ 3] + C*src[4] + D*src[5] + E*src[6] + F*src[7]);\ + OP(dst[5], A*src[ 3] + B*src[ 4] + C*src[5] + D*src[6] + E*src[7] + F*src[8]);\ + OP(dst[6], A*src[ 4] + B*src[ 5] + C*src[6] + D*src[7] + E*src[8] + F*src[9]);\ + OP(dst[7], A*src[ 5] + B*src[ 6] + C*src[7] + D*src[8] + E*src[9] + F*src[10]);\ + dst+=dstStride;\ + src+=srcStride;\ + }\ +}\ +\ +static void OPNAME ## cavs_filt8_v_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ + const int w=8;\ + uint8_t *cm = cropTbl + MAX_NEG_CROP;\ + int i;\ + for(i=0; i<w; i++)\ + {\ + const int srcB= src[-2*srcStride];\ + const int srcA= src[-1*srcStride];\ + const int src0= src[0 *srcStride];\ + const int src1= src[1 *srcStride];\ + const int src2= src[2 *srcStride];\ + const int src3= src[3 *srcStride];\ + const int src4= src[4 *srcStride];\ + const int src5= src[5 *srcStride];\ + const int src6= src[6 *srcStride];\ + const int src7= src[7 *srcStride];\ + const int src8= src[8 *srcStride];\ + const int src9= src[9 *srcStride];\ + const int src10= src[10 *srcStride];\ + OP(dst[0*dstStride], A*srcB + B*srcA + C*src0 + D*src1 + E*src2 + F*src3);\ + OP(dst[1*dstStride], A*srcA + B*src0 + C*src1 + D*src2 + E*src3 + F*src4);\ + OP(dst[2*dstStride], A*src0 + B*src1 + C*src2 + D*src3 + E*src4 + F*src5);\ + OP(dst[3*dstStride], A*src1 + B*src2 + C*src3 + D*src4 + E*src5 + F*src6);\ + OP(dst[4*dstStride], A*src2 + B*src3 + C*src4 + D*src5 + E*src6 + F*src7);\ + OP(dst[5*dstStride], A*src3 + B*src4 + C*src5 + D*src6 + E*src7 + F*src8);\ + OP(dst[6*dstStride], A*src4 + B*src5 + C*src6 + D*src7 + E*src8 + F*src9);\ + OP(dst[7*dstStride], A*src5 + B*src6 + C*src7 + D*src8 + E*src9 + F*src10);\ + dst++;\ + src++;\ + }\ +}\ +\ +static void OPNAME ## cavs_filt16_v_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ + OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\ + OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\ + src += 8*srcStride;\ + dst += 8*dstStride;\ + OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\ + OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\ +}\ +\ +static void OPNAME ## cavs_filt16_h_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ + OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\ + OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\ + src += 8*srcStride;\ + dst += 8*dstStride;\ + OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\ + OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\ +}\ + +#define CAVS_SUBPIX_HV(OPNAME, OP, NAME, AH, BH, CH, DH, EH, FH, AV, BV, CV, DV, EV, FV, FULL) \ +static void OPNAME ## cavs_filt8_hv_ ## NAME(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int srcStride){\ + int16_t temp[8*(8+5)];\ + int16_t *tmp = temp;\ + const int h=8;\ + const int w=8;\ + uint8_t *cm = cropTbl + MAX_NEG_CROP;\ + int i;\ + src1 -= 2*srcStride;\ + for(i=0; i<h+5; i++)\ + {\ + tmp[0]= AH*src1[-2] + BH*src1[-1] + CH*src1[0] + DH*src1[1] + EH*src1[2] + FH*src1[3];\ + tmp[1]= AH*src1[-1] + BH*src1[ 0] + CH*src1[1] + DH*src1[2] + EH*src1[3] + FH*src1[4];\ + tmp[2]= AH*src1[ 0] + BH*src1[ 1] + CH*src1[2] + DH*src1[3] + EH*src1[4] + FH*src1[5];\ + tmp[3]= AH*src1[ 1] + BH*src1[ 2] + CH*src1[3] + DH*src1[4] + EH*src1[5] + FH*src1[6];\ + tmp[4]= AH*src1[ 2] + BH*src1[ 3] + CH*src1[4] + DH*src1[5] + EH*src1[6] + FH*src1[7];\ + tmp[5]= AH*src1[ 3] + BH*src1[ 4] + CH*src1[5] + DH*src1[6] + EH*src1[7] + FH*src1[8];\ + tmp[6]= AH*src1[ 4] + BH*src1[ 5] + CH*src1[6] + DH*src1[7] + EH*src1[8] + FH*src1[9];\ + tmp[7]= AH*src1[ 5] + BH*src1[ 6] + CH*src1[7] + DH*src1[8] + EH*src1[9] + FH*src1[10];\ + tmp+=8;\ + src1+=srcStride;\ + }\ + if(FULL) {\ + tmp = temp+8*2; \ + for(i=0; i<w; i++) \ + { \ + const int tmpB= tmp[-2*8]; \ + const int tmpA= tmp[-1*8]; \ + const int tmp0= tmp[0 *8]; \ + const int tmp1= tmp[1 *8]; \ + const int tmp2= tmp[2 *8]; \ + const int tmp3= tmp[3 *8]; \ + const int tmp4= tmp[4 *8]; \ + const int tmp5= tmp[5 *8]; \ + const int tmp6= tmp[6 *8]; \ + const int tmp7= tmp[7 *8]; \ + const int tmp8= tmp[8 *8]; \ + const int tmp9= tmp[9 *8]; \ + const int tmp10=tmp[10*8]; \ + OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3 + 64*src2[0*srcStride]); \ + OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4 + 64*src2[1*srcStride]); \ + OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5 + 64*src2[2*srcStride]); \ + OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6 + 64*src2[3*srcStride]); \ + OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7 + 64*src2[4*srcStride]); \ + OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8 + 64*src2[5*srcStride]); \ + OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9 + 64*src2[6*srcStride]); \ + OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10 + 64*src2[7*srcStride]); \ + dst++; \ + tmp++; \ + src2++; \ + } \ + } else {\ + tmp = temp+8*2; \ + for(i=0; i<w; i++) \ + { \ + const int tmpB= tmp[-2*8]; \ + const int tmpA= tmp[-1*8]; \ + const int tmp0= tmp[0 *8]; \ + const int tmp1= tmp[1 *8]; \ + const int tmp2= tmp[2 *8]; \ + const int tmp3= tmp[3 *8]; \ + const int tmp4= tmp[4 *8]; \ + const int tmp5= tmp[5 *8]; \ + const int tmp6= tmp[6 *8]; \ + const int tmp7= tmp[7 *8]; \ + const int tmp8= tmp[8 *8]; \ + const int tmp9= tmp[9 *8]; \ + const int tmp10=tmp[10*8]; \ + OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3); \ + OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4); \ + OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5); \ + OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6); \ + OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7); \ + OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8); \ + OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9); \ + OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10); \ + dst++; \ + tmp++; \ + } \ + }\ +}\ +\ +static void OPNAME ## cavs_filt16_hv_ ## NAME(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int srcStride){ \ + OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \ + OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \ + src1 += 8*srcStride;\ + src2 += 8*srcStride;\ + dst += 8*dstStride;\ + OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \ + OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \ +}\ + +#define CAVS_MC(OPNAME, SIZE) \ +void OPNAME ## cavs_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\ +}\ +void OPNAME ## cavs_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _h_qpel_l(dst, src, stride, stride);\ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _h_hpel(dst, src, stride, stride);\ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _h_qpel_r(dst, src, stride, stride);\ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _v_qpel_l(dst, src, stride, stride);\ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _v_hpel(dst, src, stride, stride);\ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _v_qpel_r(dst, src, stride, stride);\ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_jj(dst, src, NULL, stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src, stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride, stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+1, stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride+1,stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_ff(dst, src, src+stride+1,stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_ii(dst, src, src+stride+1,stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_kk(dst, src, src+stride+1,stride, stride); \ +}\ +\ +void OPNAME ## cavs_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## cavs_filt ## SIZE ## _hv_qq(dst, src, src+stride+1,stride, stride); \ +}\ + +#define op_put1(a, b) a = cm[((b)+4)>>3] +#define op_put2(a, b) a = cm[((b)+64)>>7] +#define op_put3(a, b) a = cm[((b)+32)>>6] +#define op_put4(a, b) a = cm[((b)+512)>>10] +#define op_avg1(a, b) a = ((a)+cm[((b)+4)>>3] +1)>>1 +#define op_avg2(a, b) a = ((a)+cm[((b)+64)>>7] +1)>>1 +#define op_avg3(a, b) a = ((a)+cm[((b)+32)>>6] +1)>>1 +#define op_avg4(a, b) a = ((a)+cm[((b)+512)>>10]+1)>>1 +CAVS_SUBPIX(put_ , op_put1, hpel, 0, -1, 5, 5, -1, 0) +CAVS_SUBPIX(put_ , op_put2, qpel_l, -1, -2, 96, 42, -7, 0) +CAVS_SUBPIX(put_ , op_put2, qpel_r, 0, -7, 42, 96, -2, -1) +CAVS_SUBPIX_HV(put_, op_put3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0) +CAVS_SUBPIX_HV(put_, op_put4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0) +CAVS_SUBPIX_HV(put_, op_put4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0) +CAVS_SUBPIX_HV(put_, op_put4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0) +CAVS_SUBPIX_HV(put_, op_put4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0) +CAVS_SUBPIX_HV(put_, op_put2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1) +CAVS_SUBPIX(avg_ , op_avg1, hpel, 0, -1, 5, 5, -1, 0) +CAVS_SUBPIX(avg_ , op_avg2, qpel_l, -1, -2, 96, 42, -7, 0) +CAVS_SUBPIX(avg_ , op_avg2, qpel_r, 0, -7, 42, 96, -2, -1) +CAVS_SUBPIX_HV(avg_, op_avg3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0) +CAVS_SUBPIX_HV(avg_, op_avg4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0) +CAVS_SUBPIX_HV(avg_, op_avg4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0) +CAVS_SUBPIX_HV(avg_, op_avg4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0) +CAVS_SUBPIX_HV(avg_, op_avg4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0) +CAVS_SUBPIX_HV(avg_, op_avg2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1) +CAVS_MC(put_, 8) +CAVS_MC(put_, 16) +CAVS_MC(avg_, 8) +CAVS_MC(avg_, 16) diff --git a/libavcodec/cavsdsp.h b/libavcodec/cavsdsp.h new file mode 100644 index 0000000000..9425b9e448 --- /dev/null +++ b/libavcodec/cavsdsp.h @@ -0,0 +1,95 @@ +/* + * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. + * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> + * + * DSP function prototypes + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +void put_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride); +void put_cavs_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride); +void avg_cavs_qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride); +void cavs_filter_lv_c(uint8_t *d, int stride, int alpha, int beta, int tc, int bs1, int bs2); +void cavs_filter_lh_c(uint8_t *d, int stride, int alpha, int beta, int tc, int bs1, int bs2); +void cavs_filter_cv_c(uint8_t *d, int stride, int alpha, int beta, int tc, int bs1, int bs2); +void cavs_filter_ch_c(uint8_t *d, int stride, int alpha, int beta, int tc, int bs1, int bs2); +void cavs_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride); + +void put_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h); +void put_pixels16_c(uint8_t *block, const uint8_t *pixels, int line_size, int h); +void avg_pixels8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h); +void avg_pixels16_c(uint8_t *block, const uint8_t *pixels, int line_size, int h); |