diff options
author | Kaustubh Raste <kaustubh.raste@imgtec.com> | 2017-09-25 18:08:35 +0530 |
---|---|---|
committer | Michael Niedermayer <michael@niedermayer.cc> | 2017-09-27 21:15:57 +0200 |
commit | deeaaba1ab1e7476ef8c9e17851eb6bf49d69682 (patch) | |
tree | d7217f43653274f8b27153a963e35b7df15a867b | |
parent | 55cc0bccf357ca13d124472822477c172cd06336 (diff) | |
download | ffmpeg-deeaaba1ab1e7476ef8c9e17851eb6bf49d69682.tar.gz |
avcodec/mips: preload data in hevc sao edge 45 degree filter msa functions
Signed-off-by: Kaustubh Raste <kaustubh.raste@imgtec.com>
Reviewed-by: Manojkumar Bhosale <Manojkumar.Bhosale@imgtec.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
-rw-r--r-- | libavcodec/mips/hevc_lpf_sao_msa.c | 197 |
1 files changed, 135 insertions, 62 deletions
diff --git a/libavcodec/mips/hevc_lpf_sao_msa.c b/libavcodec/mips/hevc_lpf_sao_msa.c index 39c647ed79..c192265e85 100644 --- a/libavcodec/mips/hevc_lpf_sao_msa.c +++ b/libavcodec/mips/hevc_lpf_sao_msa.c @@ -1878,23 +1878,25 @@ static void hevc_sao_edge_filter_45degree_4width_msa(uint8_t *dst, int32_t height) { uint8_t *src_orig; - int32_t h_cnt; uint32_t dst_val0, dst_val1; - v8i16 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0 }; + v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 zero = { 0 }; + v16i8 offset, sao_offset = LD_SB(sao_offset_val); v16u8 cmp_minus10, diff_minus10, src_minus10, cmp_minus11, diff_minus11; v16u8 src_minus11, src10, src11; v16i8 src_plus0, src_zero0, src_plus1, src_zero1, dst0; - v8i16 sao_offset, src00, src01, offset_mask0, offset_mask1; + v8i16 offset_mask0, offset_mask1; - sao_offset = LD_SH(sao_offset_val); + sao_offset = __msa_pckev_b(sao_offset, sao_offset); src_orig = src - 1; + + /* load in advance */ LD_UB2(src_orig - src_stride, src_stride, src_minus10, src_minus11); + LD_UB2(src_orig + src_stride, src_stride, src10, src11); - for (h_cnt = (height >> 1); h_cnt--;) { - LD_UB2(src_orig + src_stride, src_stride, src10, src11); + for (height -= 2; height; height -= 2) { + src_orig += (src_stride << 1); SLDI_B2_0_SB(src_minus11, src10, src_zero0, src_zero1, 1); SLDI_B2_0_SB(src10, src11, src_plus0, src_plus1, 2); @@ -1917,20 +1919,22 @@ static void hevc_sao_edge_filter_45degree_4width_msa(uint8_t *dst, offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask0, - offset_mask0, offset_mask0, offset_mask0); - VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask1, - offset_mask1, offset_mask1, offset_mask1); - ILVEV_B2_SH(src_zero0, zero, src_zero1, zero, src00, src01); - ADD2(offset_mask0, src00, offset_mask1, src01, offset_mask0, - offset_mask1); - CLIP_SH2_0_255(offset_mask0, offset_mask1); + offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); + dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); - dst0 = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); + VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, + offset, offset, offset); + + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + dst0 = __msa_adds_s_b(dst0, offset); + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); src_minus10 = src10; src_minus11 = src11; + /* load in advance */ + LD_UB2(src_orig + src_stride, src_stride, src10, src11); + dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); SW(dst_val0, dst); @@ -1938,8 +1942,44 @@ static void hevc_sao_edge_filter_45degree_4width_msa(uint8_t *dst, SW(dst_val1, dst); dst += dst_stride; - src_orig += (src_stride << 1); } + + SLDI_B2_0_SB(src_minus11, src10, src_zero0, src_zero1, 1); + SLDI_B2_0_SB(src10, src11, src_plus0, src_plus1, 2); + + ILVR_B2_UB(src_plus0, src_minus10, src_plus1, src_minus11, src_minus10, + src_minus11); + ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, + src_zero1); + + cmp_minus10 = ((v16u8) src_zero0 == src_minus10); + diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); + cmp_minus10 = (src_minus10 < (v16u8) src_zero0); + diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); + + cmp_minus11 = ((v16u8) src_zero1 == src_minus11); + diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); + cmp_minus11 = (src_minus11 < (v16u8) src_zero1); + diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); + + offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); + offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); + + offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); + dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); + + VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, + offset, offset); + + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + dst0 = __msa_adds_s_b(dst0, offset); + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + + dst_val0 = __msa_copy_u_w((v4i32) dst0, 0); + dst_val1 = __msa_copy_u_w((v4i32) dst0, 2); + SW(dst_val0, dst); + dst += dst_stride; + SW(dst_val1, dst); } static void hevc_sao_edge_filter_45degree_8width_msa(uint8_t *dst, @@ -1950,23 +1990,24 @@ static void hevc_sao_edge_filter_45degree_8width_msa(uint8_t *dst, int32_t height) { uint8_t *src_orig; - int32_t h_cnt; uint64_t dst_val0, dst_val1; - v8i16 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0 }; + v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; v16u8 const1 = (v16u8) __msa_ldi_b(1); - v16i8 zero = { 0 }; + v16i8 offset, sao_offset = LD_SB(sao_offset_val); v16u8 cmp_minus10, diff_minus10, cmp_minus11, diff_minus11; v16u8 src_minus10, src10, src_minus11, src11; - v16i8 src_zero0, src_plus10, src_zero1, src_plus11, dst0, dst1; - v8i16 sao_offset, src00, offset_mask0, src01, offset_mask1; + v16i8 src_zero0, src_plus10, src_zero1, src_plus11, dst0; + v8i16 offset_mask0, offset_mask1; - sao_offset = LD_SH(sao_offset_val); + sao_offset = __msa_pckev_b(sao_offset, sao_offset); src_orig = src - 1; + /* load in advance */ LD_UB2(src_orig - src_stride, src_stride, src_minus10, src_minus11); + LD_UB2(src_orig + src_stride, src_stride, src10, src11); - for (h_cnt = (height >> 1); h_cnt--;) { - LD_UB2(src_orig + src_stride, src_stride, src10, src11); + for (height -= 2; height; height -= 2) { + src_orig += (src_stride << 1); SLDI_B2_0_SB(src_minus11, src10, src_zero0, src_zero1, 1); SLDI_B2_0_SB(src10, src11, src_plus10, src_plus11, 2); @@ -1989,29 +2030,71 @@ static void hevc_sao_edge_filter_45degree_8width_msa(uint8_t *dst, offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); - VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask0, - offset_mask0, offset_mask0, offset_mask0); - VSHF_H2_SH(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask1, - offset_mask1, offset_mask1, offset_mask1); - ILVEV_B2_SH(src_zero0, zero, src_zero1, zero, src00, src01); - ADD2(offset_mask0, src00, offset_mask1, src01, offset_mask0, - offset_mask1); - CLIP_SH2_0_255(offset_mask0, offset_mask1); - PCKEV_B2_SB(offset_mask0, offset_mask0, offset_mask1, offset_mask1, - dst0, dst1); + offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); + dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); + + VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, + offset, offset); + + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + dst0 = __msa_adds_s_b(dst0, offset); + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); src_minus10 = src10; src_minus11 = src11; + /* load in advance */ + LD_UB2(src_orig + src_stride, src_stride, src10, src11); + dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); - dst_val1 = __msa_copy_u_d((v2i64) dst1, 0); + dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); SD(dst_val0, dst); dst += dst_stride; SD(dst_val1, dst); - dst += dst_stride; - src_orig += (src_stride << 1); } + + SLDI_B2_0_SB(src_minus11, src10, src_zero0, src_zero1, 1); + SLDI_B2_0_SB(src10, src11, src_plus10, src_plus11, 2); + ILVR_B2_UB(src_plus10, src_minus10, src_plus11, src_minus11, src_minus10, + src_minus11); + ILVR_B2_SB(src_zero0, src_zero0, src_zero1, src_zero1, src_zero0, + src_zero1); + + cmp_minus10 = ((v16u8) src_zero0 == src_minus10); + diff_minus10 = __msa_nor_v(cmp_minus10, cmp_minus10); + cmp_minus10 = (src_minus10 < (v16u8) src_zero0); + diff_minus10 = __msa_bmnz_v(diff_minus10, const1, cmp_minus10); + + cmp_minus11 = ((v16u8) src_zero1 == src_minus11); + diff_minus11 = __msa_nor_v(cmp_minus11, cmp_minus11); + cmp_minus11 = (src_minus11 < (v16u8) src_zero1); + diff_minus11 = __msa_bmnz_v(diff_minus11, const1, cmp_minus11); + + offset_mask0 = (v8i16) (__msa_hadd_u_h(diff_minus10, diff_minus10) + 2); + offset_mask1 = (v8i16) (__msa_hadd_u_h(diff_minus11, diff_minus11) + 2); + + offset = __msa_pckev_b((v16i8) offset_mask1, (v16i8) offset_mask0); + dst0 = __msa_pckev_b((v16i8) src_zero1, (v16i8) src_zero0); + + VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset, offset, + offset, offset); + + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + dst0 = __msa_adds_s_b(dst0, offset); + dst0 = (v16i8) __msa_xori_b((v16u8) dst0, 128); + + src_minus10 = src10; + src_minus11 = src11; + + /* load in advance */ + LD_UB2(src_orig + src_stride, src_stride, src10, src11); + + dst_val0 = __msa_copy_u_d((v2i64) dst0, 0); + dst_val1 = __msa_copy_u_d((v2i64) dst0, 1); + SD(dst_val0, dst); + dst += dst_stride; + SD(dst_val1, dst); } static void hevc_sao_edge_filter_45degree_16multiple_msa(uint8_t *dst, @@ -2025,7 +2108,7 @@ static void hevc_sao_edge_filter_45degree_16multiple_msa(uint8_t *dst, { uint8_t *src_orig = src; uint8_t *dst_orig = dst; - int32_t h_cnt, v_cnt; + int32_t v_cnt; v16i8 edge_idx = { 1, 2, 0, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; v16u8 const1 = (v16u8) __msa_ldi_b(1); v16u8 cmp_minus10, cmp_plus10, diff_minus10, diff_plus10, cmp_minus11; @@ -2037,22 +2120,20 @@ static void hevc_sao_edge_filter_45degree_16multiple_msa(uint8_t *dst, v16u8 src12, src_minus12, dst2, src13, src_minus13, dst3; v16i8 src_zero0, src_plus10, src_zero1, src_plus11, src_zero2, src_plus12; v16i8 src_zero3, sao_offset; - v8i16 src0, src1, src2, src3, src4, src5, src6, src7; - v8i16 temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; sao_offset = LD_SB(sao_offset_val); sao_offset = __msa_pckev_b(sao_offset, sao_offset); - for (h_cnt = (height >> 2); h_cnt--;) { + for (; height; height -= 4) { src_orig = src - 1; dst_orig = dst; - LD_UB4(src_orig, src_stride, - src_minus11, src_minus12, src_minus13, src_minus14); + LD_UB4(src_orig, src_stride, src_minus11, src_minus12, src_minus13, + src_minus14); - for (v_cnt = 0; v_cnt < (width >> 4); v_cnt++) { + for (v_cnt = 0; v_cnt < width; v_cnt += 16) { src_minus10 = LD_UB(src_orig - src_stride); LD_UB4(src_orig + 16, src_stride, src10, src11, src12, src13); - src_plus13 = LD_UB(src + 1 + (v_cnt << 4) + (src_stride << 2)); + src_plus13 = LD_UB(src + 1 + v_cnt + (src_stride << 2)); src_orig += 16; SLDI_B2_SB(src10, src11, src_minus11, src_minus12, src_zero0, @@ -2114,22 +2195,14 @@ static void hevc_sao_edge_filter_45degree_16multiple_msa(uint8_t *dst, VSHF_B2_SB(edge_idx, edge_idx, sao_offset, sao_offset, offset_mask3, offset_mask3, offset_mask3, offset_mask3); - UNPCK_UB_SH(src_zero0, src0, src1); - UNPCK_SB_SH(offset_mask0, temp0, temp1); - UNPCK_UB_SH(src_zero1, src2, src3); - UNPCK_SB_SH(offset_mask1, temp2, temp3); - UNPCK_UB_SH(src_zero2, src4, src5); - UNPCK_SB_SH(offset_mask2, temp4, temp5); - UNPCK_UB_SH(src_zero3, src6, src7); - UNPCK_SB_SH(offset_mask3, temp6, temp7); - ADD4(temp0, src0, temp1, src1, temp2, src2, temp3, src3, temp0, - temp1, temp2, temp3); - ADD4(temp4, src4, temp5, src5, temp6, src6, temp7, src7, temp4, - temp5, temp6, temp7); - CLIP_SH4_0_255(temp0, temp1, temp2, temp3); - CLIP_SH4_0_255(temp4, temp5, temp6, temp7); - PCKEV_B4_UB(temp1, temp0, temp3, temp2, temp5, temp4, - temp7, temp6, dst0, dst1, dst2, dst3); + XORI_B4_128_SB(src_zero0, src_zero1, src_zero2, src_zero3); + + dst0 = (v16u8) __msa_adds_s_b((v16i8) src_zero0, offset_mask0); + dst1 = (v16u8) __msa_adds_s_b((v16i8) src_zero1, offset_mask1); + dst2 = (v16u8) __msa_adds_s_b((v16i8) src_zero2, offset_mask2); + dst3 = (v16u8) __msa_adds_s_b((v16i8) src_zero3, offset_mask3); + + XORI_B4_128_UB(dst0, dst1, dst2, dst3); src_minus11 = src10; src_minus12 = src11; |