aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoryuanhecai <yuanhecai@loongson.cn>2021-12-18 22:27:54 +0800
committerMichael Niedermayer <michael@niedermayer.cc>2021-12-23 12:28:54 +0100
commit72bcbe216ef3d47498392ed2bada83994cd9fc86 (patch)
tree27ec14e4088e463580ef473cfce2634a347c3782
parented6c5c13b10930ea95c622d6ef6e32a6e2077018 (diff)
downloadffmpeg-72bcbe216ef3d47498392ed2bada83994cd9fc86.tar.gz
avcodec: [loongarch] Optimize vp8_lpf/mc with LSX.
./ffmpeg -i ../9_vp8_1080p_30fps_2Mbps.webm -f rawvideo -y /dev/null -an before: 210fps after : 585fps Reviewed-by: Shiyou Yin <yinshiyou-hf@loongson.cn> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
-rw-r--r--libavcodec/loongarch/Makefile3
-rw-r--r--libavcodec/loongarch/vp8_lpf_lsx.c591
-rw-r--r--libavcodec/loongarch/vp8_mc_lsx.c951
-rw-r--r--libavcodec/loongarch/vp8dsp_init_loongarch.c63
-rw-r--r--libavcodec/loongarch/vp8dsp_loongarch.h90
-rw-r--r--libavcodec/vp8dsp.c2
-rw-r--r--libavcodec/vp8dsp.h1
7 files changed, 1701 insertions, 0 deletions
diff --git a/libavcodec/loongarch/Makefile b/libavcodec/loongarch/Makefile
index 30799e4e48..4e1d827e19 100644
--- a/libavcodec/loongarch/Makefile
+++ b/libavcodec/loongarch/Makefile
@@ -2,9 +2,12 @@ OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_init_loongarch.o
OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_init_loongarch.o
OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_init_loongarch.o
OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_init_loongarch.o
+OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8dsp_init_loongarch.o
LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
loongarch/h264idct_lasx.o \
loongarch/h264_deblock_lasx.o
LASX-OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_lasx.o
+LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
+ loongarch/vp8_lpf_lsx.o
diff --git a/libavcodec/loongarch/vp8_lpf_lsx.c b/libavcodec/loongarch/vp8_lpf_lsx.c
new file mode 100644
index 0000000000..f0fc3f3a5b
--- /dev/null
+++ b/libavcodec/loongarch/vp8_lpf_lsx.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp8dsp.h"
+#include "vp8dsp_loongarch.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+
+#define VP8_LPF_FILTER4_4W(p1_in_out, p0_in_out, q0_in_out, q1_in_out, \
+ mask_in, hev_in) \
+{ \
+ __m128i p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
+ __m128i filt, filt1, filt2, cnst4b, cnst3b; \
+ __m128i q0_sub_p0_l, q0_sub_p0_h, filt_h, filt_l, cnst3h; \
+ \
+ p1_m = __lsx_vxori_b(p1_in_out, 0x80); \
+ p0_m = __lsx_vxori_b(p0_in_out, 0x80); \
+ q0_m = __lsx_vxori_b(q0_in_out, 0x80); \
+ q1_m = __lsx_vxori_b(q1_in_out, 0x80); \
+ filt = __lsx_vssub_b(p1_m, q1_m); \
+ filt = filt & hev_in; \
+ \
+ q0_sub_p0 = __lsx_vsub_b(q0_m, p0_m); \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ \
+ cnst3h = __lsx_vreplgr2vr_h(3); \
+ q0_sub_p0_l = __lsx_vilvl_b(q0_sub_p0, q0_sub_p0); \
+ q0_sub_p0_l = __lsx_vdp2_h_b(q0_sub_p0_l, cnst3h); \
+ filt_l = __lsx_vilvl_b(filt_sign, filt); \
+ filt_l = __lsx_vadd_h(filt_l, q0_sub_p0_l); \
+ filt_l = __lsx_vsat_h(filt_l, 7); \
+ \
+ q0_sub_p0_h = __lsx_vilvh_b(q0_sub_p0, q0_sub_p0); \
+ q0_sub_p0_h = __lsx_vdp2_h_b(q0_sub_p0_h, cnst3h); \
+ filt_h = __lsx_vilvh_b(filt_sign, filt); \
+ filt_h = __lsx_vadd_h(filt_h, q0_sub_p0_h); \
+ filt_h = __lsx_vsat_h(filt_h, 7); \
+ \
+ filt = __lsx_vpickev_b(filt_h, filt_l); \
+ filt = filt & mask_in; \
+ cnst4b = __lsx_vreplgr2vr_b(4); \
+ filt1 = __lsx_vsadd_b(filt, cnst4b); \
+ filt1 = __lsx_vsrai_b(filt1, 3); \
+ \
+ cnst3b = __lsx_vreplgr2vr_b(3); \
+ filt2 = __lsx_vsadd_b(filt, cnst3b); \
+ filt2 = __lsx_vsrai_b(filt2, 3); \
+ \
+ q0_m = __lsx_vssub_b(q0_m, filt1); \
+ q0_in_out = __lsx_vxori_b(q0_m, 0x80); \
+ p0_m = __lsx_vsadd_b(p0_m, filt2); \
+ p0_in_out = __lsx_vxori_b(p0_m, 0x80); \
+ \
+ filt = __lsx_vsrari_b(filt1, 1); \
+ hev_in = __lsx_vxori_b(hev_in, 0xff); \
+ filt = filt & hev_in; \
+ \
+ q1_m = __lsx_vssub_b(q1_m, filt); \
+ q1_in_out = __lsx_vxori_b(q1_m, 0x80); \
+ p1_m = __lsx_vsadd_b(p1_m, filt); \
+ p1_in_out = __lsx_vxori_b(p1_m, 0x80); \
+}
+
+#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
+{ \
+ __m128i p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
+ __m128i filt, q0_sub_p0, cnst4b, cnst3b; \
+ __m128i u, filt1, filt2, filt_sign, q0_sub_p0_sign; \
+ __m128i q0_sub_p0_l, q0_sub_p0_h, filt_l, u_l, u_h, filt_h; \
+ __m128i cnst3h, cnst27h, cnst18h, cnst63h; \
+ \
+ cnst3h = __lsx_vreplgr2vr_h(3); \
+ \
+ p2_m = __lsx_vxori_b(p2, 0x80); \
+ p1_m = __lsx_vxori_b(p1, 0x80); \
+ p0_m = __lsx_vxori_b(p0, 0x80); \
+ q0_m = __lsx_vxori_b(q0, 0x80); \
+ q1_m = __lsx_vxori_b(q1, 0x80); \
+ q2_m = __lsx_vxori_b(q2, 0x80); \
+ \
+ filt = __lsx_vssub_b(p1_m, q1_m); \
+ q0_sub_p0 = __lsx_vsub_b(q0_m, p0_m); \
+ q0_sub_p0_sign = __lsx_vslti_b(q0_sub_p0, 0); \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ \
+ /* right part */ \
+ q0_sub_p0_l = __lsx_vilvl_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_l = __lsx_vmul_h(q0_sub_p0_l, cnst3h); \
+ filt_l = __lsx_vilvl_b(filt_sign, filt); \
+ filt_l = __lsx_vadd_h(filt_l, q0_sub_p0_l); \
+ filt_l = __lsx_vsat_h(filt_l, 7); \
+ \
+ /* left part */ \
+ q0_sub_p0_h = __lsx_vilvh_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_h = __lsx_vmul_h(q0_sub_p0_h, cnst3h); \
+ filt_h = __lsx_vilvh_b(filt_sign, filt); \
+ filt_h = __lsx_vadd_h(filt_h, q0_sub_p0_h); \
+ filt_h = __lsx_vsat_h(filt_h, 7); \
+ \
+ /* combine left and right part */ \
+ filt = __lsx_vpickev_b(filt_h, filt_l); \
+ filt = filt & mask; \
+ filt2 = filt & hev; \
+ /* filt_val &= ~hev */ \
+ hev = __lsx_vxori_b(hev, 0xff); \
+ filt = filt & hev; \
+ cnst4b = __lsx_vreplgr2vr_b(4); \
+ filt1 = __lsx_vsadd_b(filt2, cnst4b); \
+ filt1 = __lsx_vsrai_b(filt1, 3); \
+ cnst3b = __lsx_vreplgr2vr_b(3); \
+ filt2 = __lsx_vsadd_b(filt2, cnst3b); \
+ filt2 = __lsx_vsrai_b(filt2, 3); \
+ q0_m = __lsx_vssub_b(q0_m, filt1); \
+ p0_m = __lsx_vsadd_b(p0_m, filt2); \
+ \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ filt_l = __lsx_vilvl_b(filt_sign, filt); \
+ filt_h = __lsx_vilvh_b(filt_sign, filt); \
+ \
+ cnst27h = __lsx_vreplgr2vr_h(27); \
+ cnst63h = __lsx_vreplgr2vr_h(63); \
+ \
+ /* right part */ \
+ u_l = __lsx_vmul_h(filt_l, cnst27h); \
+ u_l = __lsx_vadd_h(u_l, cnst63h); \
+ u_l = __lsx_vsrai_h(u_l, 7); \
+ u_l = __lsx_vsat_h(u_l, 7); \
+ /* left part */ \
+ u_h = __lsx_vmul_h(filt_h, cnst27h); \
+ u_h = __lsx_vadd_h(u_h, cnst63h); \
+ u_h = __lsx_vsrai_h(u_h, 7); \
+ u_h = __lsx_vsat_h(u_h, 7); \
+ /* combine left and right part */ \
+ u = __lsx_vpickev_b(u_h, u_l); \
+ q0_m = __lsx_vssub_b(q0_m, u); \
+ q0 = __lsx_vxori_b(q0_m, 0x80); \
+ p0_m = __lsx_vsadd_b(p0_m, u); \
+ p0 = __lsx_vxori_b(p0_m, 0x80); \
+ cnst18h = __lsx_vreplgr2vr_h(18); \
+ u_l = __lsx_vmul_h(filt_l, cnst18h); \
+ u_l = __lsx_vadd_h(u_l, cnst63h); \
+ u_l = __lsx_vsrai_h(u_l, 7); \
+ u_l = __lsx_vsat_h(u_l, 7); \
+ \
+ /* left part */ \
+ u_h = __lsx_vmul_h(filt_h, cnst18h); \
+ u_h = __lsx_vadd_h(u_h, cnst63h); \
+ u_h = __lsx_vsrai_h(u_h, 7); \
+ u_h = __lsx_vsat_h(u_h, 7); \
+ /* combine left and right part */ \
+ u = __lsx_vpickev_b(u_h, u_l); \
+ q1_m = __lsx_vssub_b(q1_m, u); \
+ q1 = __lsx_vxori_b(q1_m, 0x80); \
+ p1_m = __lsx_vsadd_b(p1_m, u); \
+ p1 = __lsx_vxori_b(p1_m, 0x80); \
+ u_l = __lsx_vslli_h(filt_l, 3); \
+ u_l = __lsx_vadd_h(u_l, filt_l); \
+ u_l = __lsx_vadd_h(u_l, cnst63h); \
+ u_l = __lsx_vsrai_h(u_l, 7); \
+ u_l = __lsx_vsat_h(u_l, 7); \
+ \
+ /* left part */ \
+ u_h = __lsx_vslli_h(filt_h, 3); \
+ u_h = __lsx_vadd_h(u_h, filt_h); \
+ u_h = __lsx_vadd_h(u_h, cnst63h); \
+ u_h = __lsx_vsrai_h(u_h, 7); \
+ u_h = __lsx_vsat_h(u_h, 7); \
+ /* combine left and right part */ \
+ u = __lsx_vpickev_b(u_h, u_l); \
+ q2_m = __lsx_vssub_b(q2_m, u); \
+ q2 = __lsx_vxori_b(q2_m, 0x80); \
+ p2_m = __lsx_vsadd_b(p2_m, u); \
+ p2 = __lsx_vxori_b(p2_m, 0x80); \
+}
+
+#define LPF_MASK_HEV(p3_src, p2_src, p1_src, p0_src, \
+ q0_src, q1_src, q2_src, q3_src, \
+ limit_src, b_limit_src, thresh_src, \
+ hev_dst, mask_dst, flat_dst) \
+{ \
+ __m128i p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
+ __m128i p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
+ \
+ /* absolute subtraction of pixel values */ \
+ p3_asub_p2_m = __lsx_vabsd_bu(p3_src, p2_src); \
+ p2_asub_p1_m = __lsx_vabsd_bu(p2_src, p1_src); \
+ p1_asub_p0_m = __lsx_vabsd_bu(p1_src, p0_src); \
+ q1_asub_q0_m = __lsx_vabsd_bu(q1_src, q0_src); \
+ q2_asub_q1_m = __lsx_vabsd_bu(q2_src, q1_src); \
+ q3_asub_q2_m = __lsx_vabsd_bu(q3_src, q2_src); \
+ p0_asub_q0_m = __lsx_vabsd_bu(p0_src, q0_src); \
+ p1_asub_q1_m = __lsx_vabsd_bu(p1_src, q1_src); \
+ \
+ /* calculation of hev */ \
+ flat_dst = __lsx_vmax_bu(p1_asub_p0_m, q1_asub_q0_m); \
+ hev_dst = __lsx_vslt_bu(thresh_src, flat_dst); \
+ /* calculation of mask */ \
+ p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p0_asub_q0_m); \
+ p1_asub_q1_m = __lsx_vsrli_b(p1_asub_q1_m, 1); \
+ p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p1_asub_q1_m); \
+ mask_dst = __lsx_vslt_bu(b_limit_src, p0_asub_q0_m); \
+ mask_dst = __lsx_vmax_bu(flat_dst, mask_dst); \
+ p3_asub_p2_m = __lsx_vmax_bu(p3_asub_p2_m, p2_asub_p1_m); \
+ mask_dst = __lsx_vmax_bu(p3_asub_p2_m, mask_dst); \
+ q2_asub_q1_m = __lsx_vmax_bu(q2_asub_q1_m, q3_asub_q2_m); \
+ mask_dst = __lsx_vmax_bu(q2_asub_q1_m, mask_dst); \
+ mask_dst = __lsx_vslt_bu(limit_src, mask_dst); \
+ mask_dst = __lsx_vxori_b(mask_dst, 0xff); \
+}
+
+#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \
+{ \
+ __lsx_vstelm_w(in0, pdst, 0, in0_idx); \
+ __lsx_vstelm_h(in1, pdst + stride, 0, in1_idx); \
+}
+
+#define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
+{ \
+ __lsx_vstelm_w(in, pdst, 0, idx0); \
+ pdst += stride; \
+ __lsx_vstelm_w(in, pdst, 0, idx1); \
+ pdst += stride; \
+ __lsx_vstelm_w(in, pdst, 0, idx2); \
+ pdst += stride; \
+ __lsx_vstelm_w(in, pdst, 0, idx3); \
+ pdst += stride; \
+}
+
+void ff_vp8_v_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ /*load vector elements*/
+ DUP4_ARG2(__lsx_vld, dst - stride4, 0, dst - stride3, 0, dst - stride2, 0,
+ dst - stride, 0, p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, dst, 0, dst + stride, 0, dst + stride2, 0, dst + stride3, 0,
+ q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ /*store vector elements*/
+ __lsx_vst(p2, dst - stride3, 0);
+ __lsx_vst(p1, dst - stride2, 0);
+ __lsx_vst(p0, dst - stride, 0);
+ __lsx_vst(q0, dst, 0);
+
+ __lsx_vst(q1, dst + stride, 0);
+ __lsx_vst(q2, dst + stride2, 0);
+}
+
+void ff_vp8_v_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ __m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ DUP4_ARG2(__lsx_vld, dst_u - stride4, 0, dst_u - stride3, 0, dst_u - stride2, 0,
+ dst_u - stride, 0, p3_u, p2_u, p1_u, p0_u);
+ DUP4_ARG2(__lsx_vld, dst_u, 0, dst_u + stride, 0, dst_u + stride2, 0,
+ dst_u + stride3, 0, q0_u, q1_u, q2_u, q3_u);
+
+ DUP4_ARG2(__lsx_vld, dst_v - stride4, 0, dst_v - stride3, 0, dst_v - stride2, 0,
+ dst_v - stride, 0, p3_v, p2_v, p1_v, p0_v);
+ DUP4_ARG2(__lsx_vld, dst_v, 0, dst_v + stride, 0, dst_v + stride2, 0,
+ dst_v + stride3, 0, q0_v, q1_v, q2_v, q3_v);
+
+ /* rht 8 element of p3 are u pixel and left 8 element of p3 are v pixei */
+ DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ __lsx_vstelm_d(p2, dst_u - stride3, 0, 0);
+ __lsx_vstelm_d(p1, dst_u - stride2, 0, 0);
+ __lsx_vstelm_d(p0, dst_u - stride , 0, 0);
+ __lsx_vstelm_d(q0, dst_u, 0, 0);
+
+ __lsx_vstelm_d(q1, dst_u + stride, 0, 0);
+ __lsx_vstelm_d(q2, dst_u + stride2, 0, 0);
+
+ __lsx_vstelm_d(p2, dst_v - stride3, 0, 1);
+ __lsx_vstelm_d(p1, dst_v - stride2, 0, 1);
+ __lsx_vstelm_d(p0, dst_v - stride , 0, 1);
+ __lsx_vstelm_d(q0, dst_v, 0, 1);
+
+ __lsx_vstelm_d(q1, dst_v + stride, 0, 1);
+ __lsx_vstelm_d(q2, dst_v + stride2, 0, 1);
+}
+
+void ff_vp8_h_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ temp_src = dst - 4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row0, row1, row2, row3);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row4, row5, row6, row7);
+
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row8, row9, row10, row11);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row12, row13, row14, row15);
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10,
+ row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ tmp0 = __lsx_vilvl_b(p1, p2);
+ tmp1 = __lsx_vilvl_b(q0, p0);
+
+ tmp3 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp4 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp0 = __lsx_vilvh_b(p1, p2);
+ tmp1 = __lsx_vilvh_b(q0, p0);
+
+ tmp6 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp7 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp2 = __lsx_vilvl_b(q2, q1);
+ tmp5 = __lsx_vilvh_b(q2, q1);
+
+ temp_src = dst - 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4);
+}
+
+void ff_vp8_h_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ temp_src = dst_u - 4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row0, row1, row2, row3);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row4, row5, row6, row7);
+
+ temp_src = dst_v - 4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row8, row9, row10, row11);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row12, row13, row14, row15);
+
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ tmp0 = __lsx_vilvl_b(p1, p2);
+ tmp1 = __lsx_vilvl_b(q0, p0);
+
+ tmp3 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp4 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp0 = __lsx_vilvh_b(p1, p2);
+ tmp1 = __lsx_vilvh_b(q0, p0);
+
+ tmp6 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp7 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp2 = __lsx_vilvl_b(q2, q1);
+ tmp5 = __lsx_vilvh_b(q2, q1);
+
+ dst_u -= 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, dst_u, 4);
+
+ dst_v -= 3;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, dst_v, 4);
+}
+
+void ff_vp8_v_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h)
+{
+ __m128i mask, hev, flat;
+ __m128i thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ /* load vector elements */
+ src -= stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, p3, p2, p1, p0);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, q0, q1, q2, q3);
+ thresh = __lsx_vreplgr2vr_b(h);
+ b_limit = __lsx_vreplgr2vr_b(e);
+ limit = __lsx_vreplgr2vr_b(i);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ __lsx_vst(p1, src - stride2, 0);
+ __lsx_vst(p0, src - stride, 0);
+ __lsx_vst(q0, src, 0);
+ __lsx_vst(q1, src + stride, 0);
+}
+
+void ff_vp8_h_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h)
+{
+ __m128i mask, hev, flat;
+ __m128i thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ src -= 4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp0, tmp1, tmp2, tmp3);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp4, tmp5, tmp6, tmp7);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp8, tmp9, tmp10, tmp11);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp12, tmp13, tmp14, tmp15);
+ src -= 3 * stride4;
+
+ LSX_TRANSPOSE16x8_B(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
+ tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(h);
+ b_limit = __lsx_vreplgr2vr_b(e);
+ limit = __lsx_vreplgr2vr_b(i);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+
+ src += 2;
+ ST_W4(tmp2, 0, 1, 2, 3, src, stride);
+ ST_W4(tmp3, 0, 1, 2, 3, src, stride);
+
+ DUP2_ARG2(__lsx_vilvh_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+
+ ST_W4(tmp2, 0, 1, 2, 3, src, stride);
+ ST_W4(tmp3, 0, 1, 2, 3, src, stride);
+ src -= 4 * stride4;
+}
diff --git a/libavcodec/loongarch/vp8_mc_lsx.c b/libavcodec/loongarch/vp8_mc_lsx.c
new file mode 100644
index 0000000000..80c4f87e80
--- /dev/null
+++ b/libavcodec/loongarch/vp8_mc_lsx.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "libavcodec/vp8dsp.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+#include "vp8dsp_loongarch.h"
+
+static const uint8_t mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+static const int8_t subpel_filters_lsx[7][8] = {
+ {-6, 123, 12, -1, 0, 0, 0, 0},
+ {2, -11, 108, 36, -8, 1, 0, 0}, /* New 1/4 pel 6 tap filter */
+ {-9, 93, 50, -6, 0, 0, 0, 0},
+ {3, -16, 77, 77, -16, 3, 0, 0}, /* New 1/2 pel 6 tap filter */
+ {-6, 50, 93, -9, 0, 0, 0, 0},
+ {1, -8, 36, 108, -11, 2, 0, 0}, /* New 1/4 pel 6 tap filter */
+ {-1, 12, 123, -6, 0, 0, 0, 0},
+};
+
+#define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
+( { \
+ __m128i out0_m; \
+ \
+ out0_m = __lsx_vdp2_h_b(in0, coeff0); \
+ out0_m = __lsx_vdp2add_h_b(out0_m, in1, coeff1); \
+ out0_m = __lsx_vdp2add_h_b(out0_m, in2, coeff2); \
+ \
+ out0_m; \
+} )
+
+#define VSHF_B3_SB(in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
+ out0, out1, out2) \
+{ \
+ DUP2_ARG3(__lsx_vshuf_b, in1, in0, mask0, in3, in2, mask1, \
+ out0, out1); \
+ out2 = __lsx_vshuf_b(in5, in4, mask2); \
+}
+
+#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, \
+ filt_h0, filt_h1, filt_h2) \
+( { \
+ __m128i vec0_m, vec1_m, vec2_m; \
+ __m128i hz_out_m; \
+ \
+ VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \
+ vec0_m, vec1_m, vec2_m); \
+ hz_out_m = DPADD_SH3_SH(vec0_m, vec1_m, vec2_m, \
+ filt_h0, filt_h1, filt_h2); \
+ \
+ hz_out_m = __lsx_vsrari_h(hz_out_m, 7); \
+ hz_out_m = __lsx_vsat_h(hz_out_m, 7); \
+ \
+ hz_out_m; \
+} )
+
+#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \
+ mask0, mask1, mask2, \
+ filt0, filt1, filt2, \
+ out0, out1, out2, out3) \
+{ \
+ __m128i vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
+ \
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, src2, src2, \
+ mask0, src3, src3, mask0, vec0_m, vec1_m, vec2_m, vec3_m); \
+ DUP4_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, vec2_m, filt0, \
+ vec3_m, filt0, out0, out1, out2, out3); \
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, src2, src2, \
+ mask1, src3, src3, mask1, vec0_m, vec1_m, vec2_m, vec3_m); \
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, src2, src2, \
+ mask2, src3, src3, mask2, vec4_m, vec5_m, vec6_m, vec7_m); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec0_m, filt1, out1, vec1_m, filt1, \
+ out2, vec2_m, filt1, out3, vec3_m, filt1, out0, out1, out2, out3); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec4_m, filt2, out1, vec5_m, filt2, \
+ out2, vec6_m, filt2, out3, vec7_m, filt2, out0, out1, out2, out3); \
+}
+
+#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \
+( { \
+ __m128i tmp0; \
+ \
+ tmp0 = __lsx_vdp2_h_b(vec0, filt0); \
+ tmp0 = __lsx_vdp2add_h_b(tmp0, vec1, filt1); \
+ \
+ tmp0; \
+} )
+
+#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \
+( { \
+ __m128i vec0_m, vec1_m; \
+ __m128i hz_out_m; \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, \
+ vec0_m, vec1_m); \
+ hz_out_m = FILT_4TAP_DPADD_S_H(vec0_m, vec1_m, filt_h0, filt_h1); \
+ \
+ hz_out_m = __lsx_vsrari_h(hz_out_m, 7); \
+ hz_out_m = __lsx_vsat_h(hz_out_m, 7); \
+ \
+ hz_out_m; \
+} )
+
+void ff_put_vp8_epel8_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[mx - 1];
+ __m128i src0, src1, src2, src3, filt0, filt1, filt2;
+ __m128i mask0, mask1, mask2;
+ __m128i out0, out1, out2, out3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 2;
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src += src_stride4;
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ for (loop_cnt = (height >> 2) - 1; loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src += src_stride4;
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel16_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[mx - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1;
+ __m128i filt2, mask0, mask1, mask2;
+ __m128i out0, out1, out2, out3, out4, out5, out6, out7;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 2;
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2,
+ 0, src + src_stride3, 0, src0 ,src2, src4, src6);
+ DUP4_ARG2(__lsx_vld, src, 8, src + src_stride, 8, src + src_stride2,
+ 8, src + src_stride3, 8, src1, src3, src5, src7);
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src6, 128, src7, 128,
+ src4, src5, src6, src7);
+ src += src_stride4;
+
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2,
+ filt0, filt1, filt2, out4, out5, out6, out7);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(out1, dst, 0);
+ dst += dst_stride;
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out5, out4, 7, out7, out6, 7, out4, out5);
+ DUP2_ARG2(__lsx_vxori_b, out4, 128, out5, 128, out4, out5);
+ __lsx_vst(out4, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(out5, dst, 0);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel8_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src7, src8, src9, src10;
+ __m128i src10_l, src32_l, src76_l, src98_l, src21_l, src43_l, src87_l;
+ __m128i src109_l, filt0, filt1, filt2;
+ __m128i out0_l, out1_l, out2_l, out3_l;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ src -= src_stride2;
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src2, src1, src4,
+ src3, src10_l, src32_l, src21_l, src43_l);
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2,
+ 0, src + src_stride3, 0, src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10,
+ 128, src7, src8, src9, src10);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vilvl_b, src7, src4, src8, src7, src9, src8, src10,
+ src9, src76_l, src87_l, src98_l, src109_l);
+
+ out0_l = DPADD_SH3_SH(src10_l, src32_l, src76_l, filt0, filt1, filt2);
+ out1_l = DPADD_SH3_SH(src21_l, src43_l, src87_l, filt0, filt1, filt2);
+ out2_l = DPADD_SH3_SH(src32_l, src76_l, src98_l, filt0, filt1, filt2);
+ out3_l = DPADD_SH3_SH(src43_l, src87_l, src109_l, filt0, filt1, filt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1_l, out0_l, 7, out3_l, out2_l, 7,
+ out0_l, out1_l);
+ DUP2_ARG2(__lsx_vxori_b, out0_l, 128, out1_l, 128, out0_l, out1_l);
+
+ __lsx_vstelm_d(out0_l, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0_l, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1_l, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1_l, dst, 0, 1);
+ dst += dst_stride;
+
+ src10_l = src76_l;
+ src32_l = src98_l;
+ src21_l = src87_l;
+ src43_l = src109_l;
+ src4 = src10;
+ }
+}
+
+void ff_put_vp8_epel16_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i src10_l, src32_l, src54_l, src76_l, src21_l, src43_l, src65_l, src87_l;
+ __m128i src10_h, src32_h, src54_h, src76_h, src21_h, src43_h, src65_h, src87_h;
+ __m128i filt0, filt1, filt2;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP4_ARG2(__lsx_vld, src - src_stride2, 0, src - src_stride, 0, src, 0,
+ src + src_stride, 0, src0, src1, src2, src3);
+ src4 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src4, src3, src2, src1,
+ src10_l, src32_l, src43_l, src21_l);
+ DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src4, src3, src2, src1,
+ src10_h, src32_h, src43_h, src21_h);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src5, src6, src7, src8);
+ src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128,
+ src5, src6, src7, src8);
+
+ DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_l, src65_l, src76_l, src87_l);
+ DUP4_ARG2(__lsx_vilvh_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_h, src65_h, src76_h, src87_h);
+
+ tmp0 = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2);
+ tmp1 = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2);
+ tmp2 = DPADD_SH3_SH(src10_h, src32_h, src54_h, filt0, filt1, filt2);
+ tmp3 = DPADD_SH3_SH(src21_h, src43_h, src65_h, filt0, filt1, filt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ tmp0 = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2);
+ tmp1 = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2);
+ tmp2 = DPADD_SH3_SH(src32_h, src54_h, src76_h, filt0, filt1, filt2);
+ tmp3 = DPADD_SH3_SH(src43_h, src65_h, src87_h, filt0, filt1, filt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ src10_l = src54_l;
+ src32_l = src76_l;
+ src21_l = src65_l;
+ src43_l = src87_l;
+ src10_h = src54_h;
+ src32_h = src76_h;
+ src21_h = src65_h;
+ src43_h = src87_h;
+ src4 = src8;
+ }
+}
+
+void ff_put_vp8_epel8_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_lsx[mx - 1];
+ const int8_t *filter_vert = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, filt_hz2;
+ __m128i mask0, mask1, mask2, filt_vt0, filt_vt1, filt_vt2;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ __m128i hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= (2 + src_stride2);
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, filt_hz1);
+ filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0 ,src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, filt_vt1);
+ filt_vt2 = __lsx_vldrepl_h(filter_vert, 4);
+
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out2, hz_out1, hz_out4, hz_out3, out3, out4);
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src5, src6, src7, src8);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128,
+ src5, src6, src7, src8);
+
+ hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2,filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out5 = __lsx_vpackev_b(hz_out6, hz_out5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+
+ out7 = __lsx_vpackev_b(hz_out7, hz_out6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out6 = __lsx_vpackev_b(hz_out8, hz_out7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 1);
+ dst += dst_stride;
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out7;
+ out3 = out5;
+ out4 = out6;
+ }
+}
+
+void ff_put_vp8_epel16_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h6v6_lsx(dst, dst_stride, src, src_stride, height, mx, my);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_epel8_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src7, src8, src9, src10;
+ __m128i src10_l, src72_l, src98_l, src21_l, src87_l, src109_l, filt0, filt1;
+ __m128i out0, out1, out2, out3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ src -= src_stride;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src0, src1);
+ src2 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src7, src8, src9, src10);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src2, src8, src7, src9, src8, src10, src9,
+ src72_l, src87_l, src98_l, src109_l);
+
+ out0 = FILT_4TAP_DPADD_S_H(src10_l, src72_l, filt0, filt1);
+ out1 = FILT_4TAP_DPADD_S_H(src21_l, src87_l, filt0, filt1);
+ out2 = FILT_4TAP_DPADD_S_H(src72_l, src98_l, filt0, filt1);
+ out3 = FILT_4TAP_DPADD_S_H(src87_l, src109_l, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ src10_l = src98_l;
+ src21_l = src109_l;
+ src2 = src10;
+ }
+}
+
+void ff_put_vp8_epel16_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i src10_l, src32_l, src54_l, src21_l, src43_l, src65_l, src10_h;
+ __m128i src32_h, src54_h, src21_h, src43_h, src65_h, filt0, filt1;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ src -= src_stride;
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src0, src1);
+ src2 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_l, src21_l);
+ DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_h, src21_h);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2,
+ 0, src + src_stride3, 0, src3, src4, src5, src6);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128,
+ src3, src4, src5, src6);
+ DUP4_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src5, src4, src6,
+ src5, src32_l, src43_l, src54_l, src65_l);
+ DUP4_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src5, src4, src6,
+ src5, src32_h, src43_h, src54_h, src65_h);
+
+ tmp0 = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1);
+ tmp1 = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1);
+ tmp2 = FILT_4TAP_DPADD_S_H(src10_h, src32_h, filt0, filt1);
+ tmp3 = FILT_4TAP_DPADD_S_H(src21_h, src43_h, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ tmp0 = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1);
+ tmp1 = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1);
+ tmp2 = FILT_4TAP_DPADD_S_H(src32_h, src54_h, filt0, filt1);
+ tmp3 = FILT_4TAP_DPADD_S_H(src43_h, src65_h, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ src10_l = src54_l;
+ src21_l = src65_l;
+ src10_h = src54_h;
+ src21_h = src65_h;
+ src2 = src6;
+ }
+}
+
+void ff_put_vp8_epel8_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_lsx[mx - 1];
+ const int8_t *filter_vert = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ __m128i filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3;
+ __m128i tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= (2 + src_stride);
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, filt_hz1);
+ filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src0, src1);
+ src2 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out2, hz_out1, vec0, vec2);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src3, src4, src5, src6);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128,
+ src3, src4, src5, src6);
+
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec1 = __lsx_vpackev_b(hz_out3, hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec3 = __lsx_vpackev_b(hz_out0, hz_out3);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec0 = __lsx_vpackev_b(hz_out1, hz_out0);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1);
+
+ hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out0, hz_out3, hz_out2, hz_out1, vec1, vec2);
+ tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel16_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h6v4_lsx(dst, dst_stride, src, src_stride, height,
+ mx, my);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_epel8_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_lsx[mx - 1];
+ const int8_t *filter_vert = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, mask0, mask1;
+ __m128i filt_vt0, filt_vt1, filt_vt2;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ __m128i out0, out1, out2, out3, out4, out5, out6, out7;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= (1 + src_stride2);
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, filt_hz1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ tmp0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ tmp1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ tmp2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ tmp3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ tmp4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+
+ DUP4_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp2, tmp1,
+ tmp4, tmp3, out0, out1, out3, out4);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, filt_vt1);
+ filt_vt2 = __lsx_vldrepl_h(filter_vert, 4);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src5, src6, src7, src8);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128,
+ src5, src6, src7, src8);
+
+ tmp5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ out2 = __lsx_vpackev_b(tmp5, tmp4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ out5 = __lsx_vpackev_b(tmp6, tmp5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1);
+ out6 = __lsx_vpackev_b(tmp7, tmp6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1);
+ out7 = __lsx_vpackev_b(tmp8, tmp7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 1);
+ dst += dst_stride;
+
+ tmp4 = tmp8;
+ out0 = out2;
+ out1 = out6;
+ out3 = out5;
+ out4 = out7;
+ }
+}
+
+void ff_put_vp8_epel16_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h4v6_lsx(dst, dst_stride, src, src_stride, height,
+ mx, my);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_pixels8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t cnt;
+ __m128i src0, src1, src2, src3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ if (0 == height % 8) {
+ for (cnt = height >> 3; cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+ }
+ } else if( 0 == height % 4) {
+ for (cnt = (height >> 2); cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+ }
+ }
+}
+
+void ff_put_vp8_pixels16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t width = 16;
+ int32_t cnt, loop_cnt;
+ uint8_t *src_tmp, *dst_tmp;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ ptrdiff_t dst_stride2 = dst_stride << 1;
+ ptrdiff_t dst_stride3 = dst_stride2 + dst_stride;
+ ptrdiff_t dst_stride4 = dst_stride2 << 1;
+
+ if (0 == height % 8) {
+ for (cnt = (width >> 4); cnt--;) {
+ src_tmp = src;
+ dst_tmp = dst;
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src_tmp, 0, src_tmp + src_stride, 0,
+ src_tmp + src_stride2, 0, src_tmp + src_stride3, 0,
+ src4, src5, src6, src7);
+ src_tmp += src_stride4;
+
+ __lsx_vst(src4, dst_tmp, 0);
+ __lsx_vst(src5, dst_tmp + dst_stride, 0);
+ __lsx_vst(src6, dst_tmp + dst_stride2, 0);
+ __lsx_vst(src7, dst_tmp + dst_stride3, 0);
+ dst_tmp += dst_stride4;
+
+ DUP4_ARG2(__lsx_vld, src_tmp, 0, src_tmp + src_stride, 0,
+ src_tmp + src_stride2, 0, src_tmp + src_stride3, 0,
+ src4, src5, src6, src7);
+ src_tmp += src_stride4;
+
+ __lsx_vst(src4, dst_tmp, 0);
+ __lsx_vst(src5, dst_tmp + dst_stride, 0);
+ __lsx_vst(src6, dst_tmp + dst_stride2, 0);
+ __lsx_vst(src7, dst_tmp + dst_stride3, 0);
+ dst_tmp += dst_stride4;
+ }
+ src += 16;
+ dst += 16;
+ }
+ } else if (0 == height % 4) {
+ for (cnt = (height >> 2); cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += 4 * src_stride4;
+
+ __lsx_vst(src0, dst, 0);
+ __lsx_vst(src1, dst + dst_stride, 0);
+ __lsx_vst(src2, dst + dst_stride2, 0);
+ __lsx_vst(src3, dst + dst_stride3, 0);
+ dst += dst_stride4;
+ }
+ }
+}
diff --git a/libavcodec/loongarch/vp8dsp_init_loongarch.c b/libavcodec/loongarch/vp8dsp_init_loongarch.c
new file mode 100644
index 0000000000..63da15b198
--- /dev/null
+++ b/libavcodec/loongarch/vp8dsp_init_loongarch.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP8 compatible video decoder
+ */
+
+#include "libavutil/loongarch/cpu.h"
+#include "libavcodec/vp8dsp.h"
+#include "libavutil/attributes.h"
+#include "vp8dsp_loongarch.h"
+
+#define VP8_MC_LOONGARCH_FUNC(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel##SIZE##_h6_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel##SIZE##_v4_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel##SIZE##_h6v4_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel##SIZE##_v6_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel##SIZE##_h4v6_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel##SIZE##_h6v6_lsx;
+
+#define VP8_MC_LOONGARCH_COPY(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][0] = ff_put_vp8_pixels##SIZE##_lsx; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = ff_put_vp8_pixels##SIZE##_lsx;
+
+av_cold void ff_vp8dsp_init_loongarch(VP8DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_lsx(cpu_flags)) {
+ VP8_MC_LOONGARCH_FUNC(0, 16);
+ VP8_MC_LOONGARCH_FUNC(1, 8);
+
+ VP8_MC_LOONGARCH_COPY(0, 16);
+ VP8_MC_LOONGARCH_COPY(1, 8);
+
+ dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_lsx;
+ dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_lsx;
+ dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_lsx;
+ dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_lsx;
+
+ dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_lsx;
+ dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_lsx;
+ }
+}
diff --git a/libavcodec/loongarch/vp8dsp_loongarch.h b/libavcodec/loongarch/vp8dsp_loongarch.h
new file mode 100644
index 0000000000..87e9509db9
--- /dev/null
+++ b/libavcodec/loongarch/vp8dsp_loongarch.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_LOONGARCH_VP8DSP_LOONGARCH_H
+#define AVCODEC_LOONGARCH_VP8DSP_LOONGARCH_H
+
+#include "libavcodec/vp8dsp.h"
+
+void ff_put_vp8_pixels8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int x, int y);
+void ff_put_vp8_pixels16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int x, int y);
+
+void ff_put_vp8_epel16_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+
+void ff_put_vp8_epel8_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+
+void ff_put_vp8_epel8_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+
+/* loop filter */
+void ff_vp8_v_loop_filter16_inner_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h);
+void ff_vp8_h_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h);
+
+void ff_vp8_v_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_v_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+
+#endif // #ifndef AVCODEC_LOONGARCH_VP8DSP_LOONGARCH_H
diff --git a/libavcodec/vp8dsp.c b/libavcodec/vp8dsp.c
index 4ff63d0784..732a483b62 100644
--- a/libavcodec/vp8dsp.c
+++ b/libavcodec/vp8dsp.c
@@ -743,5 +743,7 @@ av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
ff_vp8dsp_init_x86(dsp);
if (ARCH_MIPS)
ff_vp8dsp_init_mips(dsp);
+ if (ARCH_LOONGARCH)
+ ff_vp8dsp_init_loongarch(dsp);
}
#endif /* CONFIG_VP8_DECODER */
diff --git a/libavcodec/vp8dsp.h b/libavcodec/vp8dsp.h
index cfe1524b0b..7c6208df39 100644
--- a/libavcodec/vp8dsp.h
+++ b/libavcodec/vp8dsp.h
@@ -101,6 +101,7 @@ void ff_vp8dsp_init_aarch64(VP8DSPContext *c);
void ff_vp8dsp_init_arm(VP8DSPContext *c);
void ff_vp8dsp_init_x86(VP8DSPContext *c);
void ff_vp8dsp_init_mips(VP8DSPContext *c);
+void ff_vp8dsp_init_loongarch(VP8DSPContext *c);
#define IS_VP7 1
#define IS_VP8 0