/*
 * VC1 NEON optimisations
 *
 * Copyright (c) 2010 Rob Clark <rob@ti.com>
 * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "libavutil/arm/asm.S"
#include "neon.S"

#include "config.h"

@ Transpose rows into columns of a matrix of 16-bit elements. For 4x4, pass
@ double-word registers, for 8x4, pass quad-word registers.
.macro transpose16 r0, r1, r2, r3
        @ At this point:
        @   row[0]  r0
        @   row[1]  r1
        @   row[2]  r2
        @   row[3]  r3

        vtrn.16         \r0,  \r1         @ first and second row
        vtrn.16         \r2,  \r3         @ third and fourth row
        vtrn.32         \r0,  \r2         @ first and third row
        vtrn.32         \r1,  \r3         @ second and fourth row

        @ At this point, if registers are quad-word:
        @   column[0]   d0
        @   column[1]   d2
        @   column[2]   d4
        @   column[3]   d6
        @   column[4]   d1
        @   column[5]   d3
        @   column[6]   d5
        @   column[7]   d7

        @ At this point, if registers are double-word:
        @   column[0]   d0
        @   column[1]   d1
        @   column[2]   d2
        @   column[3]   d3
.endm

@ ff_vc1_inv_trans_{4,8}x{4,8}_neon and overflow: The input values in the file
@ are supposed to be in a specific range as to allow for 16-bit math without
@ causing overflows, but sometimes the input values are just big enough to
@ barely cause overflow in vadd instructions like:
@
@   vadd.i16  q0, q8, q10
@   vshr.s16  q0, q0, #\rshift
@
@ To prevent these borderline cases from overflowing, we just need one more
@ bit of precision, which is accomplished by replacing the sequence above with:
@
@   vhadd.s16 q0, q8, q10
@   vshr.s16  q0, q0, #(\rshift -1)
@
@ This works because vhadd is a single instruction that adds, then shifts to
@ the right once, all before writing the result to the destination register.
@
@ Even with this workaround, there were still some files that caused overflows
@ in ff_vc1_inv_trans_8x8_neon. See the comments in ff_vc1_inv_trans_8x8_neon
@ for the additional workaround.

@ Takes 4 columns of 8 values each and operates on it. Modeled after the first
@ for loop in vc1_inv_trans_4x8_c.
@ Input columns: q0 q1 q2 q3
@ Output columns: q0 q1 q2 q3
@ Trashes: r12 q8 q9 q10 q11 q12 q13
.macro vc1_inv_trans_4x8_helper add rshift
        @ Compute temp1, temp2 and setup scalar #17, #22, #10
        vadd.i16        q12,   q0,  q2              @ temp1 = src[0] + src[2]
        movw            r12,   #17
        vsub.i16        q13,   q0,  q2              @ temp2 = src[0] - src[2]
        movt            r12,   #22
        vmov.32         d0[0], r12
        movw            r12,   #10
        vmov.16         d1[0], r12

        vmov.i16        q8,  #\add                  @ t1 will accumulate here
        vmov.i16        q9,  #\add                  @ t2 will accumulate here

        vmul.i16        q10, q1,  d0[1]             @ t3 = 22 * (src[1])
        vmul.i16        q11, q3,  d0[1]             @ t4 = 22 * (src[3])

        vmla.i16        q8,  q12, d0[0]             @ t1 = 17 * (temp1) + 4
        vmla.i16        q9,  q13, d0[0]             @ t2 = 17 * (temp2) + 4

        vmla.i16        q10, q3,  d1[0]             @ t3 += 10 * src[3]
        vmls.i16        q11, q1,  d1[0]             @ t4 -= 10 * src[1]

        vhadd.s16       q0,  q8,  q10               @ dst[0] = (t1 + t3) >> 1
        vhsub.s16       q3,  q8,  q10               @ dst[3] = (t1 - t3) >> 1
        vhsub.s16       q1,  q9,  q11               @ dst[1] = (t2 - t4) >> 1
        vhadd.s16       q2,  q9,  q11               @ dst[2] = (t2 + t4) >> 1

        @ Halving add/sub above already did one shift
        vshr.s16        q0,  q0,  #(\rshift - 1)    @ dst[0] >>= (rshift - 1)
        vshr.s16        q3,  q3,  #(\rshift - 1)    @ dst[3] >>= (rshift - 1)
        vshr.s16        q1,  q1,  #(\rshift - 1)    @ dst[1] >>= (rshift - 1)
        vshr.s16        q2,  q2,  #(\rshift - 1)    @ dst[2] >>= (rshift - 1)
.endm

@ Takes 8 columns of 4 values each and operates on it. Modeled after the second
@ for loop in vc1_inv_trans_4x8_c.
@ Input columns: d0 d2 d4 d6 d1 d3 d5 d7
@ Output columns: d16 d17 d18 d19 d21 d20 d23 d22
@ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
.macro vc1_inv_trans_8x4_helper add add1beforeshift rshift
        @ At this point:
        @   src[0]      d0 overwritten later
        @   src[8]      d2
        @   src[16]     d4 overwritten later
        @   src[24]     d6
        @   src[32]     d1 overwritten later
        @   src[40]     d3
        @   src[48]     d5 overwritten later
        @   src[56]     d7

        movw            r12,   #12
        vmov.i16        q14,   #\add            @ t1|t2 will accumulate here
        movt            r12,   #6

        vadd.i16        d20,   d0,  d1          @ temp1 = src[0] + src[32]
        vsub.i16        d21,   d0,  d1          @ temp2 = src[0] - src[32]
        vmov.i32        d0[0], r12              @ 16-bit: d0[0] = #12, d0[1] = #6

        vshl.i16        q15,   q2,  #4          @ t3|t4 = 16 * (src[16]|src[48])
        vswp            d4,    d5               @ q2 = src[48]|src[16]
        vmla.i16        q14,   q10, d0[0]       @ t1|t2 = 12 * (temp1|temp2) + 64
        movw            r12,   #15
        movt            r12,   #9
        vmov.i32        d0[1], r12              @ 16-bit: d0[2] = #15, d0[3] = #9
        vneg.s16        d31,   d31              @ t4 = -t4
        vmla.i16        q15,   q2,  d0[1]       @ t3|t4 += 6 * (src[48]|src[16])

        @ At this point:
        @   d0[2]   #15
        @   d0[3]   #9
        @   q1      src[8]|src[40]
        @   q3      src[24]|src[56]
        @   q14     old t1|t2
        @   q15     old t3|t4

        vshl.i16        q8,  q1,  #4            @ t1|t2 = 16 * (src[8]|src[40])
        vswp            d2,  d3                 @ q1 = src[40]|src[8]
        vshl.i16        q12, q3,  #4            @ temp3a|temp4a = 16 * src[24]|src[56]
        vswp            d6,  d7                 @ q3 = src[56]|src[24]
        vshl.i16        q13, q1,  #2            @ temp3b|temp4b = 4 * (src[40]|src[8])
        vshl.i16        q2,  q3,  #2            @ temp1|temp2 = 4 * (src[56]|src[24])
        vswp            d3,  d6                 @ q1 = src[40]|src[56], q3 = src[8]|src[24]
        vsub.i16        q9,  q13, q12           @ t3|t4 = - (temp3a|temp4a) + (temp3b|temp4b)
        vadd.i16        q8,  q8,  q2            @ t1|t2 += temp1|temp2
        vmul.i16        q12, q3,  d0[3]         @ temp3|temp4 = 9 * src[8]|src[24]
        vmla.i16        q8,  q1,  d0[3]         @ t1|t2 += 9 * (src[40]|src[56])
        vswp            d6,  d7                 @ q3 = src[24]|src[8]
        vswp            d2,  d3                 @ q1 = src[56]|src[40]

        vsub.i16        q11, q14, q15           @ t8|t7 = old t1|t2 - old t3|t4
        vadd.i16        q10, q14, q15           @ t5|t6 = old t1|t2 + old t3|t4
  .if \add1beforeshift
        vmov.i16        q15, #1
  .endif

        vadd.i16        d18, d18, d24           @ t3 += temp3
        vsub.i16        d19, d19, d25           @ t4 -= temp4

        vswp            d22, d23                @ q11 = t7|t8

        vneg.s16        d17, d17                @ t2 = -t2
        vmla.i16        q9,  q1,  d0[2]         @ t3|t4 += 15 * src[56]|src[40]
        vmla.i16        q8,  q3,  d0[2]         @ t1|t2 += 15 * src[24]|src[8]

        @ At this point:
        @   t1  d16
        @   t2  d17
        @   t3  d18
        @   t4  d19
        @   t5  d20
        @   t6  d21
        @   t7  d22
        @   t8  d23
        @   #1  q15

  .if \add1beforeshift
        vadd.i16        q3,  q15, q10           @ line[7,6] = t5|t6 + 1
        vadd.i16        q2,  q15, q11           @ line[5,4] = t7|t8 + 1
  .endif

        @ Sometimes this overflows, so to get one additional bit of precision, use
        @ a single instruction that both adds and shifts right (halving).
        vhadd.s16       q1,  q9,  q11           @ line[2,3] = (t3|t4 + t7|t8) >> 1
        vhadd.s16       q0,  q8,  q10           @ line[0,1] = (t1|t2 + t5|t6) >> 1
  .if \add1beforeshift
        vhsub.s16       q2,  q2,  q9            @ line[5,4] = (t7|t8 - t3|t4 + 1) >> 1
        vhsub.s16       q3,  q3,  q8            @ line[7,6] = (t5|t6 - t1|t2 + 1) >> 1
  .else
        vhsub.s16       q2,  q11, q9            @ line[5,4] = (t7|t8 - t3|t4) >> 1
        vhsub.s16       q3,  q10, q8            @ line[7,6] = (t5|t6 - t1|t2) >> 1
  .endif

        vshr.s16        q9,  q1,  #(\rshift - 1)    @ one shift is already done by vhadd/vhsub above
        vshr.s16        q8,  q0,  #(\rshift - 1)
        vshr.s16        q10, q2,  #(\rshift - 1)
        vshr.s16        q11, q3,  #(\rshift - 1)

        @ At this point:
        @   dst[0]   d16
        @   dst[1]   d17
        @   dst[2]   d18
        @   dst[3]   d19
        @   dst[4]   d21
        @   dst[5]   d20
        @   dst[6]   d23
        @   dst[7]   d22
.endm

@ This is modeled after the first and second for loop in vc1_inv_trans_8x8_c.
@ Input columns:  q8, q9, q10, q11, q12, q13, q14, q15
@ Output columns: q8, q9, q10, q11, q12, q13, q14, q15
@ Trashes all NEON registers (and r12) except for: q4 q5 q6 q7
.macro vc1_inv_trans_8x8_helper add add1beforeshift rshift
        @ This actually computes half of t1, t2, t3, t4, as explained below
        @ near `tNhalf`.
        vmov.i16        q0,    #(6 / 2)         @ q0 = #6/2
        vshl.i16        q1,    q10, #3          @ t3 = 16/2 * src[16]
        vshl.i16        q3,    q14, #3          @ temp4 = 16/2 * src[48]
        vmul.i16        q2,    q10, q0          @ t4 = 6/2 * src[16]
        vmla.i16        q1,    q14, q0          @ t3 += 6/2 * src[48]
        @ unused: q0, q10, q14
        vmov.i16        q0,    #(12 / 2)        @ q0 = #12/2
        vadd.i16        q10,   q8,  q12         @ temp1 = src[0] + src[32]
        vsub.i16        q14,   q8,  q12         @ temp2 = src[0] - src[32]
        @ unused: q8, q12
        vmov.i16        q8,    #(\add / 2)      @ t1 will accumulate here
        vmov.i16        q12,   #(\add / 2)      @ t2 will accumulate here
        movw            r12,   #15
        vsub.i16        q2,    q2,  q3          @ t4 = 6/2 * src[16] - 16/2 * src[48]
        movt            r12,   #9
        @ unused: q3
        vmla.i16        q8,    q10, q0          @ t1 = 12/2 * temp1 + add
        vmla.i16        q12,   q14, q0          @ t2 = 12/2 * temp2 + add
        vmov.i32        d0[0], r12
        @ unused: q3, q10, q14

        @ At this point:
        @   q0          d0=#15|#9
        @   q1  old t3
        @   q2  old t4
        @   q3
        @   q8  old t1
        @   q9          src[8]
        @   q10
        @   q11         src[24]
        @   q12 old t2
        @   q13         src[40]
        @   q14
        @   q15         src[56]

        @ unused: q3, q10, q14
        movw            r12,   #16
        vshl.i16        q3,    q9,  #4          @ t1 = 16 * src[8]
        movt            r12,   #4
        vshl.i16        q10,   q9,  #2          @ t4 = 4 * src[8]
        vmov.i32        d1[0], r12
        vmul.i16        q14,   q9,  d0[0]       @ t2 = 15 * src[8]
        vmul.i16        q9,    q9,  d0[1]       @ t3 = 9 * src[8]
        @ unused: none
        vmla.i16        q3,    q11, d0[0]       @ t1 += 15 * src[24]
        vmls.i16        q10,   q11, d0[1]       @ t4 -= 9 * src[24]
        vmls.i16        q14,   q11, d1[1]       @ t2 -= 4 * src[24]
        vmls.i16        q9,    q11, d1[0]       @ t3 -= 16 * src[24]
        @ unused: q11
        vmla.i16        q3,    q13, d0[1]       @ t1 += 9 * src[40]
        vmla.i16        q10,   q13, d0[0]       @ t4 += 15 * src[40]
        vmls.i16        q14,   q13, d1[0]       @ t2 -= 16 * src[40]
        vmla.i16        q9,    q13, d1[1]       @ t3 += 4 * src[40]
        @ unused: q11, q13

        @ Compute t5, t6, t7, t8 from old t1, t2, t3, t4. Actually, it computes
        @ half of t5, t6, t7, t8 since t1, t2, t3, t4 are halved.
        vadd.i16        q11,   q8,  q1          @ t5 = t1 + t3
        vsub.i16        q1,    q8,  q1          @ t8 = t1 - t3
        vadd.i16        q13,   q12, q2          @ t6 = t2 + t4
        vsub.i16        q2,    q12, q2          @ t7 = t2 - t4
        @ unused: q8, q12

  .if \add1beforeshift
        vmov.i16        q12,   #1
  .endif

        @ unused: q8
        vmla.i16        q3,    q15, d1[1]       @ t1 += 4 * src[56]
        vmls.i16        q14,   q15, d0[1]       @ t2 -= 9 * src[56]
        vmla.i16        q9,    q15, d0[0]       @ t3 += 15 * src[56]
        vmls.i16        q10,   q15, d1[0]       @ t4 -= 16 * src[56]
        @ unused: q0, q8, q15

        @ At this point:
        @   t1      q3
        @   t2      q14
        @   t3      q9
        @   t4      q10
        @   t5half  q11
        @   t6half  q13
        @   t7half  q2
        @   t8half  q1
        @   #1      q12
        @
        @ tNhalf is half of the value of tN (as described in vc1_inv_trans_8x8_c).
        @ This is done because sometimes files have input that causes tN + tM to
        @ overflow. To avoid this overflow, we compute tNhalf, then compute
        @ tNhalf + tM (which doesn't overflow), and then we use vhadd to compute
        @ (tNhalf + (tNhalf + tM)) >> 1 which does not overflow because it is
        @ one instruction.

        @ For each pair of tN and tM, do:
        @   lineA = t5half + t1
        @   if add1beforeshift:  t1 -= 1
        @   lineA = (t5half + lineA) >> 1
        @   lineB = t5half - t1
        @   lineB = (t5half + lineB) >> 1
        @   lineA >>= rshift - 1
        @   lineB >>= rshift - 1

        vadd.i16        q8,  q11, q3                @ q8 = t5half + t1
  .if \add1beforeshift
        vsub.i16        q3,  q3,  q12               @ q3 = t1 - 1
  .endif

        vadd.i16        q0,  q13, q14               @ q0  = t6half + t2
  .if \add1beforeshift
        vsub.i16        q14, q14, q12               @ q14 = t2 - 1
  .endif

        vadd.i16        q15, q2,  q9                @ q15 = t7half + t3
  .if \add1beforeshift
        vsub.i16        q9,  q9,  q12               @ q9  = t3 - 1
  .endif
        @ unused: none

        vhadd.s16       q8,  q11, q8                @ q8  = (t5half + t5half + t1) >> 1
        vsub.i16        q3,  q11, q3                @ q3  = t5half - t1 + 1

        vhadd.s16       q0,  q13, q0                @ q0  = (t6half + t6half + t2) >> 1
        vsub.i16        q14, q13, q14               @ q14 = t6half - t2 + 1

        vhadd.s16       q15, q2,  q15               @ q15 = (t7half + t7half + t3) >> 1
        vsub.i16        q9,  q2,  q9                @ q9  = t7half - t3 + 1

        vhadd.s16       q3,  q11, q3                @ q3  = (t5half + t5half - t1 + 1) >> 1
        @ unused: q11

        vadd.i16        q11, q1,  q10               @ q11 = t8half + t4
  .if \add1beforeshift
        vsub.i16        q10, q10, q12               @ q10 = t4 - 1
  .endif
        @ unused: q12

        vhadd.s16       q14, q13, q14               @ q14 = (t6half + t6half - t2 + 1) >> 1
        @ unused: q12, q13
        vhadd.s16       q13, q2,  q9                @ q9  = (t7half + t7half - t3 + 1) >> 1
        @ unused: q12, q2, q9

        vsub.i16        q10, q1,  q10               @ q10 = t8half - t4 + 1
        vhadd.s16       q11, q1,  q11               @ q11 = (t8half + t8half + t4) >> 1

        vshr.s16        q8,  q8,  #(\rshift - 1)    @ q8  = line[0]
        vhadd.s16       q12, q1,  q10               @ q12 = (t8half + t8half - t4 + 1) >> 1
        vshr.s16        q9,  q0,  #(\rshift - 1)    @ q9  = line[1]
        vshr.s16        q10, q15, #(\rshift - 1)    @ q10 = line[2]
        vshr.s16        q11, q11, #(\rshift - 1)    @ q11 = line[3]
        vshr.s16        q12, q12, #(\rshift - 1)    @ q12 = line[4]
        vshr.s16        q13, q13, #(\rshift - 1)    @ q13 = line[5]
        vshr.s16        q14, q14, #(\rshift - 1)    @ q14 = line[6]
        vshr.s16        q15, q3,  #(\rshift - 1)    @ q15 = line[7]
.endm

@ (int16_t *block [r0])
function ff_vc1_inv_trans_8x8_neon, export=1
        vld1.64         {q8-q9},   [r0,:128]!
        vld1.64         {q10-q11}, [r0,:128]!
        vld1.64         {q12-q13}, [r0,:128]!
        vld1.64         {q14-q15}, [r0,:128]
        sub             r0, r0, #(16 * 2 * 3)   @ restore r0

        @ At this point:
        @   src[0]  q8
        @   src[8]  q9
        @   src[16] q10
        @   src[24] q11
        @   src[32] q12
        @   src[40] q13
        @   src[48] q14
        @   src[56] q15

        vc1_inv_trans_8x8_helper add=4, add1beforeshift=0, rshift=3

        @ Transpose result matrix of 8x8
        swap4           d17, d19, d21, d23, d24, d26, d28, d30
        transpose16_4x4 q8,  q9,  q10, q11, q12, q13, q14, q15

        vc1_inv_trans_8x8_helper add=64, add1beforeshift=1, rshift=7

        vst1.64         {q8-q9},   [r0,:128]!
        vst1.64         {q10-q11}, [r0,:128]!
        vst1.64         {q12-q13}, [r0,:128]!
        vst1.64         {q14-q15}, [r0,:128]

        bx              lr
endfunc

@ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
function ff_vc1_inv_trans_8x4_neon, export=1
        vld1.64         {q0-q1}, [r2,:128]!     @ load 8 * 4 * 2 = 64 bytes / 16 bytes per quad = 4 quad registers
        vld1.64         {q2-q3}, [r2,:128]

        transpose16     q0, q1, q2, q3          @ transpose rows to columns

        @ At this point:
        @   src[0]   d0
        @   src[1]   d2
        @   src[2]   d4
        @   src[3]   d6
        @   src[4]   d1
        @   src[5]   d3
        @   src[6]   d5
        @   src[7]   d7

        vc1_inv_trans_8x4_helper    add=4, add1beforeshift=0, rshift=3

        @ Move output to more standardized registers
        vmov        d0, d16
        vmov        d2, d17
        vmov        d4, d18
        vmov        d6, d19
        vmov        d1, d21
        vmov        d3, d20
        vmov        d5, d23
        vmov        d7, d22

        @ At this point:
        @   dst[0]   d0
        @   dst[1]   d2
        @   dst[2]   d4
        @   dst[3]   d6
        @   dst[4]   d1
        @   dst[5]   d3
        @   dst[6]   d5
        @   dst[7]   d7

        transpose16     q0, q1, q2, q3   @ turn columns into rows

        @ At this point:
        @   row[0] q0
        @   row[1] q1
        @   row[2] q2
        @   row[3] q3

        vc1_inv_trans_4x8_helper    add=64, rshift=7

        @ At this point:
        @   line[0].l   d0
        @   line[0].h   d1
        @   line[1].l   d2
        @   line[1].h   d3
        @   line[2].l   d4
        @   line[2].h   d5
        @   line[3].l   d6
        @   line[3].h   d7

        @ unused registers: q12, q13, q14, q15

        vld1.64         {d28}, [r0,:64], r1     @ read dest
        vld1.64         {d29}, [r0,:64], r1
        vld1.64         {d30}, [r0,:64], r1
        vld1.64         {d31}, [r0,:64], r1
        sub             r0,  r0,  r1, lsl #2    @ restore original r0 value

        vaddw.u8        q0,  q0,  d28           @ line[0] += dest[0]
        vaddw.u8        q1,  q1,  d29           @ line[1] += dest[1]
        vaddw.u8        q2,  q2,  d30           @ line[2] += dest[2]
        vaddw.u8        q3,  q3,  d31           @ line[3] += dest[3]

        vqmovun.s16     d0,  q0                 @ line[0]
        vqmovun.s16     d1,  q1                 @ line[1]
        vqmovun.s16     d2,  q2                 @ line[2]
        vqmovun.s16     d3,  q3                 @ line[3]

        vst1.64         {d0},  [r0,:64], r1     @ write dest
        vst1.64         {d1},  [r0,:64], r1
        vst1.64         {d2},  [r0,:64], r1
        vst1.64         {d3},  [r0,:64]

        bx              lr
endfunc

@ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
function ff_vc1_inv_trans_4x8_neon, export=1
        mov             r12, #(8 * 2)  @ 8 elements per line, each element 2 bytes
        vld4.16         {d0[],  d2[],  d4[],  d6[]},  [r2,:64], r12     @ read each column into a q register
        vld4.16         {d0[1], d2[1], d4[1], d6[1]}, [r2,:64], r12
        vld4.16         {d0[2], d2[2], d4[2], d6[2]}, [r2,:64], r12
        vld4.16         {d0[3], d2[3], d4[3], d6[3]}, [r2,:64], r12
        vld4.16         {d1[],  d3[],  d5[],  d7[]},  [r2,:64], r12
        vld4.16         {d1[1], d3[1], d5[1], d7[1]}, [r2,:64], r12
        vld4.16         {d1[2], d3[2], d5[2], d7[2]}, [r2,:64], r12
        vld4.16         {d1[3], d3[3], d5[3], d7[3]}, [r2,:64]

        vc1_inv_trans_4x8_helper    add=4, rshift=3

        @ At this point:
        @   dst[0] = q0
        @   dst[1] = q1
        @   dst[2] = q2
        @   dst[3] = q3

        transpose16     q0, q1, q2, q3  @ Transpose rows (registers) into columns

        vc1_inv_trans_8x4_helper    add=64, add1beforeshift=1, rshift=7

        vld1.32         {d28[]},  [r0,:32], r1  @ read dest
        vld1.32         {d28[1]}, [r0,:32], r1
        vld1.32         {d29[]},  [r0,:32], r1
        vld1.32         {d29[1]}, [r0,:32], r1

        vld1.32         {d30[]},  [r0,:32], r1
        vld1.32         {d30[0]}, [r0,:32], r1
        vld1.32         {d31[]},  [r0,:32], r1
        vld1.32         {d31[0]}, [r0,:32], r1
        sub             r0,  r0,  r1, lsl #3    @ restore original r0 value

        vaddw.u8        q8,  q8,  d28           @ line[0,1] += dest[0,1]
        vaddw.u8        q9,  q9,  d29           @ line[2,3] += dest[2,3]
        vaddw.u8        q10, q10, d30           @ line[5,4] += dest[5,4]
        vaddw.u8        q11, q11, d31           @ line[7,6] += dest[7,6]

        vqmovun.s16     d16, q8                 @ clip(line[0,1])
        vqmovun.s16     d18, q9                 @ clip(line[2,3])
        vqmovun.s16     d20, q10                @ clip(line[5,4])
        vqmovun.s16     d22, q11                @ clip(line[7,6])

        vst1.32         {d16[0]}, [r0,:32], r1  @ write dest
        vst1.32         {d16[1]}, [r0,:32], r1
        vst1.32         {d18[0]}, [r0,:32], r1
        vst1.32         {d18[1]}, [r0,:32], r1

        vst1.32         {d20[1]}, [r0,:32], r1
        vst1.32         {d20[0]}, [r0,:32], r1
        vst1.32         {d22[1]}, [r0,:32], r1
        vst1.32         {d22[0]}, [r0,:32]

        bx              lr
endfunc

@ Setup constants in registers which are used by vc1_inv_trans_4x4_helper
.macro vc1_inv_trans_4x4_helper_setup
        vmov.i16        q13, #17
        vmov.i16        q14, #22
        vmov.i16        d30, #10                @ only need double-word, not quad-word
.endm

@ This is modeled after the first for loop in vc1_inv_trans_4x4_c.
.macro vc1_inv_trans_4x4_helper add rshift
        vmov.i16        q2,  #\add              @ t1|t2 will accumulate here

        vadd.i16        d16, d0,  d1            @ temp1 = src[0] + src[2]
        vsub.i16        d17, d0,  d1            @ temp2 = src[0] - src[2]
        vmul.i16        q3,  q14, q1            @ t3|t4 = 22 * (src[1]|src[3])
        vmla.i16        q2,  q13, q8            @ t1|t2 = 17 * (temp1|temp2) + add
        vmla.i16        d6,  d30, d3            @ t3 += 10 * src[3]
        vmls.i16        d7,  d30, d2            @ t4 -= 10 * src[1]

        vadd.i16        q0,  q2,  q3            @ dst[0,2] = (t1|t2 + t3|t4)
        vsub.i16        q1,  q2,  q3            @ dst[3,1] = (t1|t2 - t3|t4)
        vshr.s16        q0,  q0,  #\rshift      @ dst[0,2] >>= rshift
        vshr.s16        q1,  q1,  #\rshift      @ dst[3,1] >>= rshift
.endm

@ (uint8_t *dest [r0], ptrdiff_t stride [r1], int16_t *block [r2])
function ff_vc1_inv_trans_4x4_neon, export=1
        mov             r12, #(8 * 2)  @ 8 elements per line, each element 2 bytes
        vld4.16         {d0[],  d1[],  d2[],  d3[]},  [r2,:64], r12     @ read each column into a register
        vld4.16         {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r12
        vld4.16         {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r12
        vld4.16         {d0[3], d1[3], d2[3], d3[3]}, [r2,:64]

        vswp            d1,  d2         @ so that we can later access column 1 and column 3 as a single q1 register

        vc1_inv_trans_4x4_helper_setup

        @ At this point:
        @   src[0] = d0
        @   src[1] = d2
        @   src[2] = d1
        @   src[3] = d3

        vc1_inv_trans_4x4_helper add=4, rshift=3     @ compute t1, t2, t3, t4 and combine them into dst[0-3]

        @ At this point:
        @   dst[0] = d0
        @   dst[1] = d3
        @   dst[2] = d1
        @   dst[3] = d2

        transpose16     d0, d3, d1, d2  @ Transpose rows (registers) into columns

        @ At this point:
        @   src[0]  = d0
        @   src[8]  = d3
        @   src[16] = d1
        @   src[24] = d2

        vswp            d2,  d3         @ so that we can later access column 1 and column 3 in order as a single q1 register

        @ At this point:
        @   src[0]  = d0
        @   src[8]  = d2
        @   src[16] = d1
        @   src[24] = d3

        vc1_inv_trans_4x4_helper add=64, rshift=7             @ compute t1, t2, t3, t4 and combine them into dst[0-3]

        @ At this point:
        @   line[0] = d0
        @   line[1] = d3
        @   line[2] = d1
        @   line[3] = d2

        vld1.32         {d18[]},  [r0,:32], r1  @ read dest
        vld1.32         {d19[]},  [r0,:32], r1
        vld1.32         {d18[1]}, [r0,:32], r1
        vld1.32         {d19[0]}, [r0,:32], r1
        sub             r0,  r0,  r1, lsl #2    @ restore original r0 value

        vaddw.u8        q0,  q0,  d18           @ line[0,2] += dest[0,2]
        vaddw.u8        q1,  q1,  d19           @ line[3,1] += dest[3,1]

        vqmovun.s16     d0,  q0                 @ clip(line[0,2])
        vqmovun.s16     d1,  q1                 @ clip(line[3,1])

        vst1.32         {d0[0]},  [r0,:32], r1  @ write dest
        vst1.32         {d1[1]},  [r0,:32], r1
        vst1.32         {d0[1]},  [r0,:32], r1
        vst1.32         {d1[0]},  [r0,:32]

        bx              lr
endfunc

@ The absolute value of multiplication constants from vc1_mspel_filter and vc1_mspel_{ver,hor}_filter_16bits.
@ The sign is embedded in the code below that carries out the multiplication (mspel_filter{,.16}).
#define MSPEL_MODE_1_MUL_CONSTANTS  4, 53, 18, 3
#define MSPEL_MODE_2_MUL_CONSTANTS  1, 9,  9,  1
#define MSPEL_MODE_3_MUL_CONSTANTS  3, 18, 53, 4

@ These constants are from reading the source code of vc1_mspel_mc and determining the value that
@ is added to `rnd` to result in the variable `r`, and the value of the variable `shift`.
#define MSPEL_MODES_11_ADDSHIFT_CONSTANTS   15, 5
#define MSPEL_MODES_12_ADDSHIFT_CONSTANTS   3,  3
#define MSPEL_MODES_13_ADDSHIFT_CONSTANTS   15, 5
#define MSPEL_MODES_21_ADDSHIFT_CONSTANTS   MSPEL_MODES_12_ADDSHIFT_CONSTANTS
#define MSPEL_MODES_22_ADDSHIFT_CONSTANTS   0,  1
#define MSPEL_MODES_23_ADDSHIFT_CONSTANTS   3,  3
#define MSPEL_MODES_31_ADDSHIFT_CONSTANTS   MSPEL_MODES_13_ADDSHIFT_CONSTANTS
#define MSPEL_MODES_32_ADDSHIFT_CONSTANTS   MSPEL_MODES_23_ADDSHIFT_CONSTANTS
#define MSPEL_MODES_33_ADDSHIFT_CONSTANTS   15, 5

@ The addition and shift constants from vc1_mspel_filter.
#define MSPEL_MODE_1_ADDSHIFT_CONSTANTS     32, 6
#define MSPEL_MODE_2_ADDSHIFT_CONSTANTS     8,  4
#define MSPEL_MODE_3_ADDSHIFT_CONSTANTS     32, 6

@ Setup constants in registers for a subsequent use of mspel_filter{,.16}.
.macro mspel_constants typesize reg_a reg_b reg_c reg_d filter_a filter_b filter_c filter_d reg_add filter_add_register
  @ Typesize should be i8 or i16.

  @ Only set the register if the value is not 1 and unique
  .if \filter_a != 1
        vmov.\typesize  \reg_a,  #\filter_a          @ reg_a = filter_a
  .endif
        vmov.\typesize  \reg_b,  #\filter_b          @ reg_b = filter_b
  .if \filter_b != \filter_c
        vmov.\typesize  \reg_c,  #\filter_c          @ reg_c = filter_c
  .endif
  .if \filter_d != 1
        vmov.\typesize  \reg_d,  #\filter_d          @ reg_d = filter_d
  .endif
  @ vdup to double the size of typesize
  .ifc \typesize,i8
        vdup.16         \reg_add,  \filter_add_register     @ reg_add = filter_add_register
  .else
        vdup.32         \reg_add,  \filter_add_register     @ reg_add = filter_add_register
  .endif
.endm

@ After mspel_constants has been used, do the filtering.
.macro mspel_filter acc dest src0 src1 src2 src3 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift narrow=1
  .if \filter_a != 1
        @ If filter_a != 1, then we need a move and subtract instruction
        vmov            \acc,  \reg_add                     @ acc = reg_add
        vmlsl.u8        \acc,  \reg_a,  \src0               @ acc -= filter_a * src[-stride]
  .else
        @ If filter_a is 1, then just subtract without an extra move
        vsubw.u8        \acc,  \reg_add,  \src0             @ acc = reg_add - src[-stride]      @ since filter_a == 1
  .endif
        vmlal.u8        \acc,  \reg_b,  \src1               @ acc += filter_b * src[0]
  .if \filter_b != \filter_c
        vmlal.u8        \acc,  \reg_c,  \src2               @ acc += filter_c * src[stride]
  .else
        @ If filter_b is the same as filter_c, use the same reg_b register
        vmlal.u8        \acc,  \reg_b,  \src2               @ acc += filter_c * src[stride]     @ where filter_c == filter_b
  .endif
  .if \filter_d != 1
        @ If filter_d != 1, then do a multiply accumulate
        vmlsl.u8        \acc,  \reg_d,  \src3               @ acc -= filter_d * src[stride * 2]
  .else
        @ If filter_d is 1, then just do a subtract
        vsubw.u8        \acc,  \acc,    \src3               @ acc -= src[stride * 2]            @ since filter_d == 1
  .endif
  .if \narrow
        vqshrun.s16     \dest, \acc,    #\filter_shift      @ dest = clip_uint8(acc >> filter_shift)
  .else
        vshr.s16        \dest, \acc,    #\filter_shift      @ dest = acc >> filter_shift
  .endif
.endm

@ This is similar to mspel_filter, but the input is 16-bit instead of 8-bit and narrow=0 is not supported.
.macro mspel_filter.16 acc0 acc1 acc0_0 acc0_1 dest src0 src1 src2 src3 src4 src5 src6 src7 filter_a filter_b filter_c filter_d reg_a reg_b reg_c reg_d reg_add filter_shift
  .if \filter_a != 1
        vmov            \acc0,  \reg_add
        vmov            \acc1,  \reg_add
        vmlsl.s16       \acc0,  \reg_a,  \src0
        vmlsl.s16       \acc1,  \reg_a,  \src1
  .else
        vsubw.s16       \acc0,  \reg_add,  \src0
        vsubw.s16       \acc1,  \reg_add,  \src1
  .endif
        vmlal.s16       \acc0,  \reg_b,  \src2
        vmlal.s16       \acc1,  \reg_b,  \src3
  .if \filter_b != \filter_c
        vmlal.s16       \acc0,  \reg_c,  \src4
        vmlal.s16       \acc1,  \reg_c,  \src5
  .else
        vmlal.s16       \acc0,  \reg_b,  \src4
        vmlal.s16       \acc1,  \reg_b,  \src5
  .endif
  .if \filter_d != 1
        vmlsl.s16       \acc0,  \reg_d,  \src6
        vmlsl.s16       \acc1,  \reg_d,  \src7
  .else
        vsubw.s16       \acc0,  \acc0,   \src6
        vsubw.s16       \acc1,  \acc1,   \src7
  .endif
        @ Use acc0_0 and acc0_1 as temp space
        vqshrun.s32     \acc0_0, \acc0,  #\filter_shift     @ Shift and narrow with saturation from s32 to u16
        vqshrun.s32     \acc0_1, \acc1,  #\filter_shift
        vqmovn.u16      \dest,  \acc0                       @ Narrow with saturation from u16 to u8
.endm

@ Register usage for put_vc1_mspel_mc functions. Registers marked 'hv' are only used in put_vc1_mspel_mc_hv.
@
@   r0        adjusted dst
@   r1        adjusted src
@   r2        stride
@   r3        adjusted rnd
@   r4 [hv]   tmp
@   r11 [hv]  sp saved
@   r12       loop counter
@   d0        src[-stride]
@   d1        src[0]
@   d2        src[stride]
@   d3        src[stride * 2]
@   q0 [hv]   src[-stride]
@   q1 [hv]   src[0]
@   q2 [hv]   src[stride]
@   q3 [hv]   src[stride * 2]
@   d21       often result from mspel_filter
@   q11       accumulator 0
@   q12 [hv]  accumulator 1
@   q13       accumulator initial value
@   d28       filter_a
@   d29       filter_b
@   d30       filter_c
@   d31       filter_d

@ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
.macro put_vc1_mspel_mc_hv hmode vmode filter_h_a filter_h_b filter_h_c filter_h_d filter_v_a filter_v_b filter_v_c filter_v_d filter_add filter_shift
function ff_put_vc1_mspel_mc\hmode\()\vmode\()_neon, export=1
        push            {r4, r11, lr}
        mov             r11, sp                 @ r11 = stack pointer before realignmnet
A       bic             sp,  sp,  #15           @ sp = round down to multiple of 16 bytes
T       bic             r4,  r11, #15
T       mov             sp,  r4
        sub             sp,  sp,  #(8*2*16)     @ make space for 8 rows * 2 byte per element * 16 elements per row (to fit 11 actual elements per row)
        mov             r4,  sp                 @ r4 = int16_t tmp[8 * 16]

        sub             r1,  r1,  #1            @ src -= 1
  .if \filter_add != 0
        add             r3,  r3,  #\filter_add  @ r3 = filter_add + rnd
  .endif
        mov             r12, #8                 @ loop counter
        sub             r1,  r1,  r2            @ r1 = &src[-stride]      @ slide back

        @ Do vertical filtering from src into tmp
        mspel_constants i8, d28, d29, d30, d31, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, q13, r3

        vld1.64         {d0,d1}, [r1], r2
        vld1.64         {d2,d3}, [r1], r2
        vld1.64         {d4,d5}, [r1], r2

1:
        subs            r12,  r12,  #4

        vld1.64         {d6,d7}, [r1], r2
        mspel_filter    q11, q11, d0, d2, d4, d6, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        mspel_filter    q12, q12, d1, d3, d5, d7, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        vst1.64         {q11,q12}, [r4,:128]!   @ store and increment

        vld1.64         {d0,d1}, [r1], r2
        mspel_filter    q11, q11, d2, d4, d6, d0, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        mspel_filter    q12, q12, d3, d5, d7, d1, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        vst1.64         {q11,q12}, [r4,:128]!   @ store and increment

        vld1.64         {d2,d3}, [r1], r2
        mspel_filter    q11, q11, d4, d6, d0, d2, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        mspel_filter    q12, q12, d5, d7, d1, d3, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        vst1.64         {q11,q12}, [r4,:128]!   @ store and increment

        vld1.64         {d4,d5}, [r1], r2
        mspel_filter    q11, q11, d6, d0, d2, d4, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        mspel_filter    q12, q12, d7, d1, d3, d5, \filter_v_a, \filter_v_b, \filter_v_c, \filter_v_d, d28, d29, d30, d31, q13, \filter_shift, narrow=0
        vst1.64         {q11,q12}, [r4,:128]!   @ store and increment

        bne             1b

        rsb             r3,   r3,  #(64 + \filter_add)      @ r3 = (64 + filter_add) - r3
        mov             r12,  #8                @ loop counter
        mov             r4,   sp                @ r4 = tmp

        @ Do horizontal filtering from temp to dst
        mspel_constants i16, d28, d29, d30, d31, \filter_h_a, \filter_h_b, \filter_h_c, \filter_h_d, q13, r3

2:
        subs            r12,  r12,  #1

        vld1.64         {q0,q1}, [r4,:128]!     @ read one line of tmp
        vext.16         q2,   q0,   q1,  #2
        vext.16         q3,   q0,   q1,  #3
        vext.16         q1,   q0,   q1,  #1     @ do last because it writes to q1 which is read by the other vext instructions

        mspel_filter.16 q11, q12, d22, d23, d21, d0, d1, d2, d3, d4, d5, d6, d7, \filter_h_a, \filter_h_b, \filter_h_c, \filter_h_d, d28, d29, d30, d31, q13, 7

        vst1.64         {d21}, [r0,:64], r2     @ store and increment dst

        bne             2b

        mov             sp,  r11
        pop             {r4, r11, pc}
endfunc
.endm

@ Use C preprocessor and assembler macros to expand to functions for horizontal and vertical filtering.
#define PUT_VC1_MSPEL_MC_HV(hmode, vmode)   \
    put_vc1_mspel_mc_hv hmode, vmode, \
        MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS, \
        MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS, \
        MSPEL_MODES_ ## hmode ## vmode ## _ADDSHIFT_CONSTANTS

PUT_VC1_MSPEL_MC_HV(1, 1)
PUT_VC1_MSPEL_MC_HV(1, 2)
PUT_VC1_MSPEL_MC_HV(1, 3)
PUT_VC1_MSPEL_MC_HV(2, 1)
PUT_VC1_MSPEL_MC_HV(2, 2)
PUT_VC1_MSPEL_MC_HV(2, 3)
PUT_VC1_MSPEL_MC_HV(3, 1)
PUT_VC1_MSPEL_MC_HV(3, 2)
PUT_VC1_MSPEL_MC_HV(3, 3)

#undef PUT_VC1_MSPEL_MC_HV

.macro  put_vc1_mspel_mc_h_only hmode filter_a filter_b filter_c filter_d filter_add filter_shift
function ff_put_vc1_mspel_mc\hmode\()0_neon, export=1
        rsb             r3,   r3,   #\filter_add        @ r3 = filter_add - r = filter_add - rnd
        mov             r12,  #8                        @ loop counter
        sub             r1,   r1,   #1                  @ slide back, using immediate

        mspel_constants i8, d28, d29, d30, d31, \filter_a, \filter_b, \filter_c, \filter_d, q13, r3

1:
        subs            r12,  r12,  #1

        vld1.64         {d0,d1}, [r1], r2               @ read 16 bytes even though we only need 11, also src += stride
        vext.8          d2,   d0,   d1,  #2
        vext.8          d3,   d0,   d1,  #3
        vext.8          d1,   d0,   d1,  #1             @ do last because it writes to d1 which is read by the other vext instructions

        mspel_filter    q11, d21, d0, d1, d2, d3, \filter_a, \filter_b, \filter_c, \filter_d, d28, d29, d30, d31, q13, \filter_shift

        vst1.64         {d21}, [r0,:64], r2             @ store and increment dst

        bne             1b

        bx              lr
endfunc
.endm

@ Use C preprocessor and assembler macros to expand to functions for horizontal only filtering.
#define PUT_VC1_MSPEL_MC_H_ONLY(hmode) \
        put_vc1_mspel_mc_h_only hmode, MSPEL_MODE_ ## hmode ## _MUL_CONSTANTS, MSPEL_MODE_ ## hmode ## _ADDSHIFT_CONSTANTS

PUT_VC1_MSPEL_MC_H_ONLY(1)
PUT_VC1_MSPEL_MC_H_ONLY(2)
PUT_VC1_MSPEL_MC_H_ONLY(3)

#undef PUT_VC1_MSPEL_MC_H_ONLY

@ (uint8_t *dst [r0], const uint8_t *src [r1], ptrdiff_t stride [r2], int rnd [r3])
.macro put_vc1_mspel_mc_v_only vmode filter_a filter_b filter_c filter_d filter_add filter_shift
function ff_put_vc1_mspel_mc0\vmode\()_neon, export=1
        add             r3,   r3,   #\filter_add - 1    @ r3 = filter_add - r = filter_add - (1 - rnd) = filter_add - 1 + rnd
        mov             r12,  #8                        @ loop counter
        sub             r1,   r1,   r2                  @ r1 = &src[-stride]      @ slide back

        mspel_constants i8, d28, d29, d30, d31, \filter_a, \filter_b, \filter_c, \filter_d, q13, r3

        vld1.64         {d0},  [r1], r2                 @ d0 = src[-stride]
        vld1.64         {d1},  [r1], r2                 @ d1 = src[0]
        vld1.64         {d2},  [r1], r2                 @ d2 = src[stride]

1:
        subs            r12,  r12,  #4

        vld1.64         {d3},  [r1], r2                 @ d3 = src[stride * 2]
        mspel_filter    q11, d21, d0, d1, d2, d3, \filter_a, \filter_b, \filter_c, \filter_d, d28, d29, d30, d31, q13, \filter_shift
        vst1.64         {d21}, [r0,:64], r2             @ store and increment dst

        vld1.64         {d0},  [r1], r2                 @ d0 = next line
        mspel_filter    q11, d21, d1, d2, d3, d0, \filter_a, \filter_b, \filter_c, \filter_d, d28, d29, d30, d31, q13, \filter_shift
        vst1.64         {d21}, [r0,:64], r2             @ store and increment dst

        vld1.64         {d1},  [r1], r2                 @ d1 = next line
        mspel_filter    q11, d21, d2, d3, d0, d1, \filter_a, \filter_b, \filter_c, \filter_d, d28, d29, d30, d31, q13, \filter_shift
        vst1.64         {d21}, [r0,:64], r2             @ store and increment dst

        vld1.64         {d2},  [r1], r2                 @ d2 = next line
        mspel_filter    q11, d21, d3, d0, d1, d2, \filter_a, \filter_b, \filter_c, \filter_d, d28, d29, d30, d31, q13, \filter_shift
        vst1.64         {d21}, [r0,:64], r2             @ store and increment dst

        bne             1b

        bx              lr
endfunc
.endm

@ Use C preprocessor and assembler macros to expand to functions for vertical only filtering.
#define PUT_VC1_MSPEL_MC_V_ONLY(vmode) \
        put_vc1_mspel_mc_v_only vmode, MSPEL_MODE_ ## vmode ## _MUL_CONSTANTS, MSPEL_MODE_ ## vmode ## _ADDSHIFT_CONSTANTS

PUT_VC1_MSPEL_MC_V_ONLY(1)
PUT_VC1_MSPEL_MC_V_ONLY(2)
PUT_VC1_MSPEL_MC_V_ONLY(3)

#undef PUT_VC1_MSPEL_MC_V_ONLY

function ff_put_pixels8x8_neon, export=1
        vld1.64         {d0}, [r1], r2
        vld1.64         {d1}, [r1], r2
        vld1.64         {d2}, [r1], r2
        vld1.64         {d3}, [r1], r2
        vld1.64         {d4}, [r1], r2
        vld1.64         {d5}, [r1], r2
        vld1.64         {d6}, [r1], r2
        vld1.64         {d7}, [r1]
        vst1.64         {d0}, [r0,:64], r2
        vst1.64         {d1}, [r0,:64], r2
        vst1.64         {d2}, [r0,:64], r2
        vst1.64         {d3}, [r0,:64], r2
        vst1.64         {d4}, [r0,:64], r2
        vst1.64         {d5}, [r0,:64], r2
        vst1.64         {d6}, [r0,:64], r2
        vst1.64         {d7}, [r0,:64]
        bx              lr
endfunc

function ff_vc1_inv_trans_8x8_dc_neon, export=1
        ldrsh           r2, [r2]              @ int dc = block[0];

        vld1.64         {d0},  [r0,:64], r1
        vld1.64         {d1},  [r0,:64], r1
        vld1.64         {d4},  [r0,:64], r1
        vld1.64         {d5},  [r0,:64], r1

        add             r2, r2, r2, lsl #1    @ dc = (3 * dc +  1) >> 1;
        vld1.64         {d6},  [r0,:64], r1
        add             r2, r2, #1
        vld1.64         {d7},  [r0,:64], r1
        vld1.64         {d16}, [r0,:64], r1
        vld1.64         {d17}, [r0,:64], r1
        asr             r2, r2, #1

        sub             r0,  r0,  r1, lsl #3  @ restore r0 to original value

        add             r2, r2, r2, lsl #1    @ dc = (3 * dc + 16) >> 5;
        add             r2, r2, #16
        asr             r2, r2, #5

        vdup.16         q1,  r2               @ dc

        vaddw.u8        q9,   q1,  d0
        vaddw.u8        q10,  q1,  d1
        vaddw.u8        q11,  q1,  d4
        vaddw.u8        q12,  q1,  d5
        vqmovun.s16     d0,  q9
        vqmovun.s16     d1,  q10
        vqmovun.s16     d4,  q11
        vst1.64         {d0},  [r0,:64], r1
        vqmovun.s16     d5,  q12
        vst1.64         {d1},  [r0,:64], r1
        vaddw.u8        q13,  q1,  d6
        vst1.64         {d4},  [r0,:64], r1
        vaddw.u8        q14,  q1,  d7
        vst1.64         {d5},  [r0,:64], r1
        vaddw.u8        q15,  q1,  d16
        vaddw.u8        q1,   q1,  d17        @ this destroys q1
        vqmovun.s16     d6,  q13
        vqmovun.s16     d7,  q14
        vqmovun.s16     d16, q15
        vqmovun.s16     d17, q1
        vst1.64         {d6},  [r0,:64], r1
        vst1.64         {d7},  [r0,:64], r1
        vst1.64         {d16}, [r0,:64], r1
        vst1.64         {d17}, [r0,:64]
        bx              lr
endfunc

function ff_vc1_inv_trans_8x4_dc_neon, export=1
        ldrsh           r2, [r2]              @ int dc = block[0];

        vld1.64         {d0},  [r0,:64], r1
        vld1.64         {d1},  [r0,:64], r1
        vld1.64         {d4},  [r0,:64], r1
        vld1.64         {d5},  [r0,:64], r1

        add             r2, r2, r2, lsl #1    @ dc = ( 3 * dc +  1) >> 1;

        sub             r0,  r0,  r1, lsl #2  @ restore r0 to original value

        add             r2, r2, #1
        asr             r2, r2, #1

        add             r2, r2, r2, lsl #4    @ dc = (17 * dc + 64) >> 7;
        add             r2, r2, #64
        asr             r2, r2, #7

        vdup.16         q1,  r2               @ dc

        vaddw.u8        q3,  q1,  d0
        vaddw.u8        q8,  q1,  d1
        vaddw.u8        q9,  q1,  d4
        vaddw.u8        q10, q1,  d5
        vqmovun.s16     d0,  q3
        vqmovun.s16     d1,  q8
        vqmovun.s16     d4,  q9
        vst1.64         {d0},  [r0,:64], r1
        vqmovun.s16     d5,  q10
        vst1.64         {d1},  [r0,:64], r1
        vst1.64         {d4},  [r0,:64], r1
        vst1.64         {d5},  [r0,:64]
        bx              lr
endfunc

function ff_vc1_inv_trans_4x8_dc_neon, export=1
        ldrsh           r2, [r2]              @ int dc = block[0];

        vld1.32         {d0[]},   [r0,:32], r1
        vld1.32         {d1[]},   [r0,:32], r1
        vld1.32         {d0[1]},  [r0,:32], r1
        vld1.32         {d1[1]},  [r0,:32], r1

        add             r2, r2, r2, lsl #4    @ dc = (17 * dc +  4) >> 3;
        vld1.32         {d4[]},   [r0,:32], r1
        add             r2, r2, #4
        vld1.32         {d5[]},   [r0,:32], r1
        vld1.32         {d4[1]},  [r0,:32], r1
        asr             r2, r2, #3
        vld1.32         {d5[1]},  [r0,:32], r1

        add             r2, r2, r2, lsl #1    @ dc = (12 * dc + 64) >> 7;

        sub             r0,  r0,  r1, lsl #3  @ restore r0 to original value

        lsl             r2, r2, #2
        add             r2, r2, #64
        asr             r2, r2, #7

        vdup.16         q1,  r2               @ dc

        vaddw.u8        q3,  q1,  d0
        vaddw.u8        q8,  q1,  d1
        vaddw.u8        q9,  q1,  d4
        vaddw.u8        q10, q1,  d5
        vqmovun.s16     d0,  q3
        vst1.32         {d0[0]},  [r0,:32], r1
        vqmovun.s16     d1,  q8
        vst1.32         {d1[0]},  [r0,:32], r1
        vqmovun.s16     d4,  q9
        vst1.32         {d0[1]},  [r0,:32], r1
        vqmovun.s16     d5,  q10
        vst1.32         {d1[1]},  [r0,:32], r1
        vst1.32         {d4[0]},  [r0,:32], r1
        vst1.32         {d5[0]},  [r0,:32], r1
        vst1.32         {d4[1]},  [r0,:32], r1
        vst1.32         {d5[1]},  [r0,:32]
        bx              lr
endfunc

function ff_vc1_inv_trans_4x4_dc_neon, export=1
        ldrsh           r2, [r2]              @ int dc = block[0];

        vld1.32         {d0[]},   [r0,:32], r1
        vld1.32         {d1[]},   [r0,:32], r1
        vld1.32         {d0[1]},  [r0,:32], r1
        vld1.32         {d1[1]},  [r0,:32], r1

        add             r2, r2, r2, lsl #4    @ dc = (17 * dc +  4) >> 3;

        sub             r0,  r0,  r1, lsl #2  @ restore r0 to original value

        add             r2, r2, #4
        asr             r2, r2, #3

        add             r2, r2, r2, lsl #4    @ dc = (17 * dc + 64) >> 7;
        add             r2, r2, #64
        asr             r2, r2, #7

        vdup.16         q1,  r2               @ dc

        vaddw.u8        q2,  q1,  d0
        vaddw.u8        q3,  q1,  d1
        vqmovun.s16     d0,  q2
        vst1.32         {d0[0]},  [r0,:32], r1
        vqmovun.s16     d1,  q3
        vst1.32         {d1[0]},  [r0,:32], r1
        vst1.32         {d0[1]},  [r0,:32], r1
        vst1.32         {d1[1]},  [r0,:32]
        bx              lr
endfunc

@ VC-1 in-loop deblocking filter for 4 pixel pairs at boundary of vertically-neighbouring blocks
@ On entry:
@   r0 -> top-left pel of lower block
@   r1 = row stride, bytes
@   r2 = PQUANT bitstream parameter
function ff_vc1_v_loop_filter4_neon, export=1
        sub             r3, r0, r1, lsl #2
        vldr            d0, .Lcoeffs
        vld1.32         {d1[0]}, [r0], r1       @ P5
        vld1.32         {d2[0]}, [r3], r1       @ P1
        vld1.32         {d3[0]}, [r3], r1       @ P2
        vld1.32         {d4[0]}, [r0], r1       @ P6
        vld1.32         {d5[0]}, [r3], r1       @ P3
        vld1.32         {d6[0]}, [r0], r1       @ P7
        vld1.32         {d7[0]}, [r3]           @ P4
        vld1.32         {d16[0]}, [r0]          @ P8
        vshll.u8        q9, d1, #1              @ 2*P5
        vdup.16         d17, r2                 @ pq
        vshll.u8        q10, d2, #1             @ 2*P1
        vmovl.u8        q11, d3                 @ P2
        vmovl.u8        q1, d4                  @ P6
        vmovl.u8        q12, d5                 @ P3
        vmls.i16        d20, d22, d0[1]         @ 2*P1-5*P2
        vmovl.u8        q11, d6                 @ P7
        vmls.i16        d18, d2, d0[1]          @ 2*P5-5*P6
        vshll.u8        q2, d5, #1              @ 2*P3
        vmovl.u8        q3, d7                  @ P4
        vmla.i16        d18, d22, d0[1]         @ 2*P5-5*P6+5*P7
        vmovl.u8        q11, d16                @ P8
        vmla.u16        d20, d24, d0[1]         @ 2*P1-5*P2+5*P3
        vmovl.u8        q12, d1                 @ P5
        vmls.u16        d4, d6, d0[1]           @ 2*P3-5*P4
        vmls.u16        d18, d22, d0[0]         @ 2*P5-5*P6+5*P7-2*P8
        vsub.i16        d1, d6, d24             @ P4-P5
        vmls.i16        d20, d6, d0[0]          @ 2*P1-5*P2+5*P3-2*P4
        vmla.i16        d4, d24, d0[1]          @ 2*P3-5*P4+5*P5
        vmls.i16        d4, d2, d0[0]           @ 2*P3-5*P4+5*P5-2*P6
        vabs.s16        d2, d1
        vrshr.s16       d3, d18, #3
        vrshr.s16       d5, d20, #3
        vshr.s16        d2, d2, #1              @ clip
        vrshr.s16       d4, d4, #3
        vabs.s16        d3, d3                  @ a2
        vshr.s16        d1, d1, #8              @ clip_sign
        vabs.s16        d5, d5                  @ a1
        vceq.i16        d7, d2, #0              @ test clip == 0
        vabs.s16        d16, d4                 @ a0
        vshr.s16        d4, d4, #8              @ a0_sign
        vcge.s16        d18, d5, d3             @ test a1 >= a2
        vcge.s16        d17, d16, d17           @ test a0 >= pq
        vbsl            d18, d3, d5             @ a3
        vsub.i16        d1, d1, d4              @ clip_sign - a0_sign
        vorr            d3, d7, d17             @ test clip == 0 || a0 >= pq
        vqsub.u16       d4, d16, d18            @ a0 >= a3 ? a0-a3 : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        d5, d18, d16            @ test a3 >= a0
        vmul.i16        d0, d4, d0[1]           @ a0 >= a3 ? 5*(a0-a3) : 0
        vorr            d4, d3, d5              @ test clip == 0 || a0 >= pq || a3 >= a0
        vmov.32         r0, d4[1]               @ move to gp reg
        vshr.u16        d0, d0, #3              @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
        vcge.s16        d4, d0, d2
        tst             r0, #1
        bne             1f                      @ none of the 4 pixel pairs should be updated if this one is not filtered
        vbsl            d4, d2, d0              @ FFMIN(d, clip)
        vbic            d0, d4, d3              @ set each d to zero if it should not be filtered because clip == 0 || a0 >= pq (a3 > a0 case already zeroed by saturating sub)
        vmls.i16        d6, d0, d1              @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
        vmla.i16        d24, d0, d1             @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
        vqmovun.s16     d0, q3
        vqmovun.s16     d1, q12
        vst1.32         {d0[0]}, [r3], r1
        vst1.32         {d1[0]}, [r3]
1:      bx              lr
endfunc

@ VC-1 in-loop deblocking filter for 4 pixel pairs at boundary of horizontally-neighbouring blocks
@ On entry:
@   r0 -> top-left pel of right block
@   r1 = row stride, bytes
@   r2 = PQUANT bitstream parameter
function ff_vc1_h_loop_filter4_neon, export=1
        sub             r3, r0, #4              @ where to start reading
        vldr            d0, .Lcoeffs
        vld1.32         {d2}, [r3], r1
        sub             r0, r0, #1              @ where to start writing
        vld1.32         {d4}, [r3], r1
        vld1.32         {d3}, [r3], r1
        vld1.32         {d5}, [r3]
        vdup.16         d1, r2                  @ pq
        vtrn.8          q1, q2
        vtrn.16         d2, d3                  @ P1, P5, P3, P7
        vtrn.16         d4, d5                  @ P2, P6, P4, P8
        vshll.u8        q3, d2, #1              @ 2*P1, 2*P5
        vmovl.u8        q8, d4                  @ P2, P6
        vmovl.u8        q9, d3                  @ P3, P7
        vmovl.u8        q2, d5                  @ P4, P8
        vmls.i16        q3, q8, d0[1]           @ 2*P1-5*P2, 2*P5-5*P6
        vshll.u8        q10, d3, #1             @ 2*P3, 2*P7
        vmovl.u8        q1, d2                  @ P1, P5
        vmla.i16        q3, q9, d0[1]           @ 2*P1-5*P2+5*P3, 2*P5-5*P6+5*P7
        vmls.i16        q3, q2, d0[0]           @ 2*P1-5*P2+5*P3-2*P4, 2*P5-5*P6+5*P7-2*P8
        vmov            d2, d3                  @ needs to be in an even-numbered vector for when we come to narrow it later
        vmls.i16        d20, d4, d0[1]          @ 2*P3-5*P4
        vmla.i16        d20, d3, d0[1]          @ 2*P3-5*P4+5*P5
        vsub.i16        d3, d4, d2              @ P4-P5
        vmls.i16        d20, d17, d0[0]         @ 2*P3-5*P4+5*P5-2*P6
        vrshr.s16       q3, q3, #3
        vabs.s16        d5, d3
        vshr.s16        d3, d3, #8              @ clip_sign
        vrshr.s16       d16, d20, #3
        vabs.s16        q3, q3                  @ a1, a2
        vshr.s16        d5, d5, #1              @ clip
        vabs.s16        d17, d16                @ a0
        vceq.i16        d18, d5, #0             @ test clip == 0
        vshr.s16        d16, d16, #8            @ a0_sign
        vcge.s16        d19, d6, d7             @ test a1 >= a2
        vcge.s16        d1, d17, d1             @ test a0 >= pq
        vsub.i16        d16, d3, d16            @ clip_sign - a0_sign
        vbsl            d19, d7, d6             @ a3
        vorr            d1, d18, d1             @ test clip == 0 || a0 >= pq
        vqsub.u16       d3, d17, d19            @ a0 >= a3 ? a0-a3 : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        d6, d19, d17            @ test a3 >= a0    @
        vmul.i16        d0, d3, d0[1]           @ a0 >= a3 ? 5*(a0-a3) : 0
        vorr            d3, d1, d6              @ test clip == 0 || a0 >= pq || a3 >= a0
        vmov.32         r2, d3[1]               @ move to gp reg
        vshr.u16        d0, d0, #3              @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
        vcge.s16        d3, d0, d5
        tst             r2, #1
        bne             1f                      @ none of the 4 pixel pairs should be updated if this one is not filtered
        vbsl            d3, d5, d0              @ FFMIN(d, clip)
        vbic            d0, d3, d1              @ set each d to zero if it should not be filtered because clip == 0 || a0 >= pq (a3 > a0 case already zeroed by saturating sub)
        vmla.i16        d2, d0, d16             @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
        vmls.i16        d4, d0, d16             @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
        vqmovun.s16     d1, q1
        vqmovun.s16     d0, q2
        vst2.8          {d0[0], d1[0]}, [r0], r1
        vst2.8          {d0[1], d1[1]}, [r0], r1
        vst2.8          {d0[2], d1[2]}, [r0], r1
        vst2.8          {d0[3], d1[3]}, [r0]
1:      bx              lr
endfunc

@ VC-1 in-loop deblocking filter for 8 pixel pairs at boundary of vertically-neighbouring blocks
@ On entry:
@   r0 -> top-left pel of lower block
@   r1 = row stride, bytes
@   r2 = PQUANT bitstream parameter
function ff_vc1_v_loop_filter8_neon, export=1
        sub             r3, r0, r1, lsl #2
        vldr            d0, .Lcoeffs
        vld1.32         {d1}, [r0, :64], r1     @ P5
        vld1.32         {d2}, [r3, :64], r1     @ P1
        vld1.32         {d3}, [r3, :64], r1     @ P2
        vld1.32         {d4}, [r0, :64], r1     @ P6
        vld1.32         {d5}, [r3, :64], r1     @ P3
        vld1.32         {d6}, [r0, :64], r1     @ P7
        vshll.u8        q8, d1, #1              @ 2*P5
        vshll.u8        q9, d2, #1              @ 2*P1
        vld1.32         {d7}, [r3, :64]         @ P4
        vmovl.u8        q1, d3                  @ P2
        vld1.32         {d20}, [r0, :64]        @ P8
        vmovl.u8        q11, d4                 @ P6
        vdup.16         q12, r2                 @ pq
        vmovl.u8        q13, d5                 @ P3
        vmls.i16        q9, q1, d0[1]           @ 2*P1-5*P2
        vmovl.u8        q1, d6                  @ P7
        vshll.u8        q2, d5, #1              @ 2*P3
        vmls.i16        q8, q11, d0[1]          @ 2*P5-5*P6
        vmovl.u8        q3, d7                  @ P4
        vmovl.u8        q10, d20                @ P8
        vmla.i16        q8, q1, d0[1]           @ 2*P5-5*P6+5*P7
        vmovl.u8        q1, d1                  @ P5
        vmla.i16        q9, q13, d0[1]          @ 2*P1-5*P2+5*P3
        vsub.i16        q13, q3, q1             @ P4-P5
        vmls.i16        q2, q3, d0[1]           @ 2*P3-5*P4
        vmls.i16        q8, q10, d0[0]          @ 2*P5-5*P6+5*P7-2*P8
        vabs.s16        q10, q13
        vshr.s16        q13, q13, #8            @ clip_sign
        vmls.i16        q9, q3, d0[0]           @ 2*P1-5*P2+5*P3-2*P4
        vshr.s16        q10, q10, #1            @ clip
        vmla.i16        q2, q1, d0[1]           @ 2*P3-5*P4+5*P5
        vrshr.s16       q8, q8, #3
        vmls.i16        q2, q11, d0[0]          @ 2*P3-5*P4+5*P5-2*P6
        vceq.i16        q11, q10, #0            @ test clip == 0
        vrshr.s16       q9, q9, #3
        vabs.s16        q8, q8                  @ a2
        vabs.s16        q9, q9                  @ a1
        vrshr.s16       q2, q2, #3
        vcge.s16        q14, q9, q8             @ test a1 >= a2
        vabs.s16        q15, q2                 @ a0
        vshr.s16        q2, q2, #8              @ a0_sign
        vbsl            q14, q8, q9             @ a3
        vcge.s16        q8, q15, q12            @ test a0 >= pq
        vsub.i16        q2, q13, q2             @ clip_sign - a0_sign
        vqsub.u16       q9, q15, q14            @ a0 >= a3 ? a0-a3 : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        q12, q14, q15           @ test a3 >= a0
        vorr            q8, q11, q8             @ test clip == 0 || a0 >= pq
        vmul.i16        q0, q9, d0[1]           @ a0 >= a3 ? 5*(a0-a3) : 0
        vorr            q9, q8, q12             @ test clip == 0 || a0 >= pq || a3 >= a0
        vshl.i64        q11, q9, #16
        vmov.32         r0, d18[1]              @ move to gp reg
        vshr.u16        q0, q0, #3              @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
        vmov.32         r2, d19[1]
        vshr.s64        q9, q11, #48
        vcge.s16        q11, q0, q10
        vorr            q8, q8, q9
        and             r0, r0, r2
        vbsl            q11, q10, q0            @ FFMIN(d, clip)
        tst             r0, #1
        bne             1f                      @ none of the 8 pixel pairs should be updated in this case
        vbic            q0, q11, q8             @ set each d to zero if it should not be filtered
        vmls.i16        q3, q0, q2              @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
        vmla.i16        q1, q0, q2              @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
        vqmovun.s16     d0, q3
        vqmovun.s16     d1, q1
        vst1.32         {d0}, [r3, :64], r1
        vst1.32         {d1}, [r3, :64]
1:      bx              lr
endfunc

.align  5
.Lcoeffs:
.quad   0x00050002

@ VC-1 in-loop deblocking filter for 8 pixel pairs at boundary of horizontally-neighbouring blocks
@ On entry:
@   r0 -> top-left pel of right block
@   r1 = row stride, bytes
@   r2 = PQUANT bitstream parameter
function ff_vc1_h_loop_filter8_neon, export=1
        push            {lr}
        sub             r3, r0, #4              @ where to start reading
        vldr            d0, .Lcoeffs
        vld1.32         {d2}, [r3], r1          @ P1[0], P2[0]...
        sub             r0, r0, #1              @ where to start writing
        vld1.32         {d4}, [r3], r1
        add             r12, r0, r1, lsl #2
        vld1.32         {d3}, [r3], r1
        vld1.32         {d5}, [r3], r1
        vld1.32         {d6}, [r3], r1
        vld1.32         {d16}, [r3], r1
        vld1.32         {d7}, [r3], r1
        vld1.32         {d17}, [r3]
        vtrn.8          q1, q2                  @ P1[0], P1[1], P3[0]... P1[2], P1[3], P3[2]... P2[0], P2[1], P4[0]... P2[2], P2[3], P4[2]...
        vdup.16         q9, r2                  @ pq
        vtrn.16         d2, d3                  @ P1[0], P1[1], P1[2], P1[3], P5[0]... P3[0], P3[1], P3[2], P3[3], P7[0]...
        vtrn.16         d4, d5                  @ P2[0], P2[1], P2[2], P2[3], P6[0]... P4[0], P4[1], P4[2], P4[3], P8[0]...
        vtrn.8          q3, q8                  @ P1[4], P1[5], P3[4]... P1[6], P1[7], P3[6]... P2[4], P2[5], P4[4]... P2[6], P2[7], P4[6]...
        vtrn.16         d6, d7                  @ P1[4], P1[5], P1[6], P1[7], P5[4]... P3[4], P3[5], P3[5], P3[7], P7[4]...
        vtrn.16         d16, d17                @ P2[4], P2[5], P2[6], P2[7], P6[4]... P4[4], P4[5], P4[6], P4[7], P8[4]...
        vtrn.32         d2, d6                  @ P1, P5
        vtrn.32         d4, d16                 @ P2, P6
        vtrn.32         d3, d7                  @ P3, P7
        vtrn.32         d5, d17                 @ P4, P8
        vshll.u8        q10, d2, #1             @ 2*P1
        vshll.u8        q11, d6, #1             @ 2*P5
        vmovl.u8        q12, d4                 @ P2
        vmovl.u8        q13, d16                @ P6
        vmovl.u8        q14, d3                 @ P3
        vmls.i16        q10, q12, d0[1]         @ 2*P1-5*P2
        vmovl.u8        q12, d7                 @ P7
        vshll.u8        q1, d3, #1              @ 2*P3
        vmls.i16        q11, q13, d0[1]         @ 2*P5-5*P6
        vmovl.u8        q2, d5                  @ P4
        vmovl.u8        q8, d17                 @ P8
        vmla.i16        q11, q12, d0[1]         @ 2*P5-5*P6+5*P7
        vmovl.u8        q3, d6                  @ P5
        vmla.i16        q10, q14, d0[1]         @ 2*P1-5*P2+5*P3
        vsub.i16        q12, q2, q3             @ P4-P5
        vmls.i16        q1, q2, d0[1]           @ 2*P3-5*P4
        vmls.i16        q11, q8, d0[0]          @ 2*P5-5*P6+5*P7-2*P8
        vabs.s16        q8, q12
        vshr.s16        q12, q12, #8            @ clip_sign
        vmls.i16        q10, q2, d0[0]          @ 2*P1-5*P2+5*P3-2*P4
        vshr.s16        q8, q8, #1              @ clip
        vmla.i16        q1, q3, d0[1]           @ 2*P3-5*P4+5*P5
        vrshr.s16       q11, q11, #3
        vmls.i16        q1, q13, d0[0]          @ 2*P3-5*P4+5*P5-2*P6
        vceq.i16        q13, q8, #0             @ test clip == 0
        vrshr.s16       q10, q10, #3
        vabs.s16        q11, q11                @ a2
        vabs.s16        q10, q10                @ a1
        vrshr.s16       q1, q1, #3
        vcge.s16        q14, q10, q11           @ test a1 >= a2
        vabs.s16        q15, q1                 @ a0
        vshr.s16        q1, q1, #8              @ a0_sign
        vbsl            q14, q11, q10           @ a3
        vcge.s16        q9, q15, q9             @ test a0 >= pq
        vsub.i16        q1, q12, q1             @ clip_sign - a0_sign
        vqsub.u16       q10, q15, q14           @ a0 >= a3 ? a0-a3 : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        q11, q14, q15           @ test a3 >= a0
        vorr            q9, q13, q9             @ test clip == 0 || a0 >= pq
        vmul.i16        q0, q10, d0[1]          @ a0 >= a3 ? 5*(a0-a3) : 0
        vorr            q10, q9, q11            @ test clip == 0 || a0 >= pq || a3 >= a0
        vmov.32         r2, d20[1]              @ move to gp reg
        vshr.u16        q0, q0, #3              @ a0 >= a3 ? (5*(a0-a3))>>3 : 0
        vmov.32         r3, d21[1]
        vcge.s16        q10, q0, q8
        and             r14, r2, r3
        vbsl            q10, q8, q0             @ FFMIN(d, clip)
        tst             r14, #1
        bne             2f                      @ none of the 8 pixel pairs should be updated in this case
        vbic            q0, q10, q9             @ set each d to zero if it should not be filtered because clip == 0 || a0 >= pq (a3 > a0 case already zeroed by saturating sub)
        vmla.i16        q3, q0, q1              @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P5
        vmls.i16        q2, q0, q1              @ invert d depending on clip_sign & a0_sign, or zero it if they match, and accumulate into P4
        vqmovun.s16     d1, q3
        vqmovun.s16     d0, q2
        tst             r2, #1
        bne             1f                      @ none of the first 4 pixel pairs should be updated if so
        vst2.8          {d0[0], d1[0]}, [r0], r1
        vst2.8          {d0[1], d1[1]}, [r0], r1
        vst2.8          {d0[2], d1[2]}, [r0], r1
        vst2.8          {d0[3], d1[3]}, [r0]
1:      tst             r3, #1
        bne             2f                      @ none of the second 4 pixel pairs should be updated if so
        vst2.8          {d0[4], d1[4]}, [r12], r1
        vst2.8          {d0[5], d1[5]}, [r12], r1
        vst2.8          {d0[6], d1[6]}, [r12], r1
        vst2.8          {d0[7], d1[7]}, [r12]
2:      pop             {pc}
endfunc

@ VC-1 in-loop deblocking filter for 16 pixel pairs at boundary of vertically-neighbouring blocks
@ On entry:
@   r0 -> top-left pel of lower block
@   r1 = row stride, bytes
@   r2 = PQUANT bitstream parameter
function ff_vc1_v_loop_filter16_neon, export=1
        vpush           {d8-d15}
        sub             r3, r0, r1, lsl #2
        vldr            d0, .Lcoeffs
        vld1.64         {q1}, [r0, :128], r1    @ P5
        vld1.64         {q2}, [r3, :128], r1    @ P1
        vld1.64         {q3}, [r3, :128], r1    @ P2
        vld1.64         {q4}, [r0, :128], r1    @ P6
        vld1.64         {q5}, [r3, :128], r1    @ P3
        vld1.64         {q6}, [r0, :128], r1    @ P7
        vshll.u8        q7, d2, #1              @ 2*P5[0..7]
        vshll.u8        q8, d4, #1              @ 2*P1[0..7]
        vld1.64         {q9}, [r3, :128]        @ P4
        vmovl.u8        q10, d6                 @ P2[0..7]
        vld1.64         {q11}, [r0, :128]       @ P8
        vmovl.u8        q12, d8                 @ P6[0..7]
        vdup.16         q13, r2                 @ pq
        vshll.u8        q2, d5, #1              @ 2*P1[8..15]
        vmls.i16        q8, q10, d0[1]          @ 2*P1[0..7]-5*P2[0..7]
        vshll.u8        q10, d3, #1             @ 2*P5[8..15]
        vmovl.u8        q3, d7                  @ P2[8..15]
        vmls.i16        q7, q12, d0[1]          @ 2*P5[0..7]-5*P6[0..7]
        vmovl.u8        q4, d9                  @ P6[8..15]
        vmovl.u8        q14, d10                @ P3[0..7]
        vmovl.u8        q15, d12                @ P7[0..7]
        vmls.i16        q2, q3, d0[1]           @ 2*P1[8..15]-5*P2[8..15]
        vshll.u8        q3, d10, #1             @ 2*P3[0..7]
        vmls.i16        q10, q4, d0[1]          @ 2*P5[8..15]-5*P6[8..15]
        vmovl.u8        q6, d13                 @ P7[8..15]
        vmla.i16        q8, q14, d0[1]          @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]
        vmovl.u8        q14, d18                @ P4[0..7]
        vmovl.u8        q9, d19                 @ P4[8..15]
        vmla.i16        q7, q15, d0[1]          @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]
        vmovl.u8        q15, d11                @ P3[8..15]
        vshll.u8        q5, d11, #1             @ 2*P3[8..15]
        vmls.i16        q3, q14, d0[1]          @ 2*P3[0..7]-5*P4[0..7]
        vmla.i16        q2, q15, d0[1]          @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]
        vmovl.u8        q15, d22                @ P8[0..7]
        vmovl.u8        q11, d23                @ P8[8..15]
        vmla.i16        q10, q6, d0[1]          @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]
        vmovl.u8        q6, d2                  @ P5[0..7]
        vmovl.u8        q1, d3                  @ P5[8..15]
        vmls.i16        q5, q9, d0[1]           @ 2*P3[8..15]-5*P4[8..15]
        vmls.i16        q8, q14, d0[0]          @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]-2*P4[0..7]
        vmls.i16        q7, q15, d0[0]          @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]-2*P8[0..7]
        vsub.i16        q15, q14, q6            @ P4[0..7]-P5[0..7]
        vmla.i16        q3, q6, d0[1]           @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]
        vrshr.s16       q8, q8, #3
        vmls.i16        q2, q9, d0[0]           @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]-2*P4[8..15]
        vrshr.s16       q7, q7, #3
        vmls.i16        q10, q11, d0[0]         @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]-2*P8[8..15]
        vabs.s16        q11, q15
        vabs.s16        q8, q8                  @ a1[0..7]
        vmla.i16        q5, q1, d0[1]           @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]
        vshr.s16        q15, q15, #8            @ clip_sign[0..7]
        vrshr.s16       q2, q2, #3
        vmls.i16        q3, q12, d0[0]          @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]-2*P6[0..7]
        vabs.s16        q7, q7                  @ a2[0..7]
        vrshr.s16       q10, q10, #3
        vsub.i16        q12, q9, q1             @ P4[8..15]-P5[8..15]
        vshr.s16        q11, q11, #1            @ clip[0..7]
        vmls.i16        q5, q4, d0[0]           @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]-2*P6[8..15]
        vcge.s16        q4, q8, q7              @ test a1[0..7] >= a2[0..7]
        vabs.s16        q2, q2                  @ a1[8..15]
        vrshr.s16       q3, q3, #3
        vabs.s16        q10, q10                @ a2[8..15]
        vbsl            q4, q7, q8              @ a3[0..7]
        vabs.s16        q7, q12
        vshr.s16        q8, q12, #8             @ clip_sign[8..15]
        vrshr.s16       q5, q5, #3
        vcge.s16        q12, q2, q10            @ test a1[8..15] >= a2[8.15]
        vshr.s16        q7, q7, #1              @ clip[8..15]
        vbsl            q12, q10, q2            @ a3[8..15]
        vabs.s16        q2, q3                  @ a0[0..7]
        vceq.i16        q10, q11, #0            @ test clip[0..7] == 0
        vshr.s16        q3, q3, #8              @ a0_sign[0..7]
        vsub.i16        q3, q15, q3             @ clip_sign[0..7] - a0_sign[0..7]
        vcge.s16        q15, q2, q13            @ test a0[0..7] >= pq
        vorr            q10, q10, q15           @ test clip[0..7] == 0 || a0[0..7] >= pq
        vqsub.u16       q15, q2, q4             @ a0[0..7] >= a3[0..7] ? a0[0..7]-a3[0..7] : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        q2, q4, q2              @ test a3[0..7] >= a0[0..7]
        vabs.s16        q4, q5                  @ a0[8..15]
        vshr.s16        q5, q5, #8              @ a0_sign[8..15]
        vmul.i16        q15, q15, d0[1]         @ a0[0..7] >= a3[0..7] ? 5*(a0[0..7]-a3[0..7]) : 0
        vcge.s16        q13, q4, q13            @ test a0[8..15] >= pq
        vorr            q2, q10, q2             @ test clip[0..7] == 0 || a0[0..7] >= pq || a3[0..7] >= a0[0..7]
        vsub.i16        q5, q8, q5              @ clip_sign[8..15] - a0_sign[8..15]
        vceq.i16        q8, q7, #0              @ test clip[8..15] == 0
        vshr.u16        q15, q15, #3            @ a0[0..7] >= a3[0..7] ? (5*(a0[0..7]-a3[0..7]))>>3 : 0
        vmov.32         r0, d4[1]               @ move to gp reg
        vorr            q8, q8, q13             @ test clip[8..15] == 0 || a0[8..15] >= pq
        vqsub.u16       q13, q4, q12            @ a0[8..15] >= a3[8..15] ? a0[8..15]-a3[8..15] : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vmov.32         r2, d5[1]
        vcge.s16        q4, q12, q4             @ test a3[8..15] >= a0[8..15]
        vshl.i64        q2, q2, #16
        vcge.s16        q12, q15, q11
        vmul.i16        q0, q13, d0[1]          @ a0[8..15] >= a3[8..15] ? 5*(a0[8..15]-a3[8..15]) : 0
        vorr            q4, q8, q4              @ test clip[8..15] == 0 || a0[8..15] >= pq || a3[8..15] >= a0[8..15]
        vshr.s64        q2, q2, #48
        and             r0, r0, r2
        vbsl            q12, q11, q15           @ FFMIN(d[0..7], clip[0..7])
        vshl.i64        q11, q4, #16
        vmov.32         r2, d8[1]
        vshr.u16        q0, q0, #3              @ a0[8..15] >= a3[8..15] ? (5*(a0[8..15]-a3[8..15]))>>3 : 0
        vorr            q2, q10, q2
        vmov.32         r12, d9[1]
        vshr.s64        q4, q11, #48
        vcge.s16        q10, q0, q7
        vbic            q2, q12, q2             @ set each d[0..7] to zero if it should not be filtered because clip[0..7] == 0 || a0[0..7] >= pq (a3 > a0 case already zeroed by saturating sub)
        vorr            q4, q8, q4
        and             r2, r2, r12
        vbsl            q10, q7, q0             @ FFMIN(d[8..15], clip[8..15])
        vmls.i16        q14, q2, q3             @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P4[0..7]
        and             r0, r0, r2
        vbic            q0, q10, q4             @ set each d[8..15] to zero if it should not be filtered because clip[8..15] == 0 || a0[8..15] >= pq (a3 > a0 case already zeroed by saturating sub)
        tst             r0, #1
        bne             1f                      @ none of the 16 pixel pairs should be updated in this case
        vmla.i16        q6, q2, q3              @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P5[0..7]
        vmls.i16        q9, q0, q5              @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P4[8..15]
        vqmovun.s16     d4, q14
        vmla.i16        q1, q0, q5              @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P5[8..15]
        vqmovun.s16     d0, q6
        vqmovun.s16     d5, q9
        vqmovun.s16     d1, q1
        vst1.64         {q2}, [r3, :128], r1
        vst1.64         {q0}, [r3, :128]
1:      vpop            {d8-d15}
        bx              lr
endfunc

@ VC-1 in-loop deblocking filter for 16 pixel pairs at boundary of horizontally-neighbouring blocks
@ On entry:
@   r0 -> top-left pel of right block
@   r1 = row stride, bytes
@   r2 = PQUANT bitstream parameter
function ff_vc1_h_loop_filter16_neon, export=1
        push            {r4-r6,lr}
        vpush           {d8-d15}
        sub             r3, r0, #4              @ where to start reading
        vldr            d0, .Lcoeffs
        vld1.32         {d2}, [r3], r1          @ P1[0], P2[0]...
        sub             r0, r0, #1              @ where to start writing
        vld1.32         {d3}, [r3], r1
        add             r4, r0, r1, lsl #2
        vld1.32         {d10}, [r3], r1
        vld1.32         {d11}, [r3], r1
        vld1.32         {d16}, [r3], r1
        vld1.32         {d4}, [r3], r1
        vld1.32         {d8}, [r3], r1
        vtrn.8          d2, d3                  @ P1[0], P1[1], P3[0]... P2[0], P2[1], P4[0]...
        vld1.32         {d14}, [r3], r1
        vld1.32         {d5}, [r3], r1
        vtrn.8          d10, d11                @ P1[2], P1[3], P3[2]... P2[2], P2[3], P4[2]...
        vld1.32         {d6}, [r3], r1
        vld1.32         {d12}, [r3], r1
        vtrn.8          d16, d4                 @ P1[4], P1[5], P3[4]... P2[4], P2[5], P4[4]...
        vld1.32         {d13}, [r3], r1
        vtrn.16         d2, d10                 @ P1[0], P1[1], P1[2], P1[3], P5[0]... P3[0], P3[1], P3[2], P3[3], P7[0]...
        vld1.32         {d1}, [r3], r1
        vtrn.8          d8, d14                 @ P1[6], P1[7], P3[6]... P2[6], P2[7], P4[6]...
        vld1.32         {d7}, [r3], r1
        vtrn.16         d3, d11                 @ P2[0], P2[1], P2[2], P2[3], P6[0]... P4[0], P4[1], P4[2], P4[3], P8[0]...
        vld1.32         {d9}, [r3], r1
        vtrn.8          d5, d6                  @ P1[8], P1[9], P3[8]... P2[8], P2[9], P4[8]...
        vld1.32         {d15}, [r3]
        vtrn.16         d16, d8                 @ P1[4], P1[5], P1[6], P1[7], P5[4]... P3[4], P3[5], P3[6], P3[7], P7[4]...
        vtrn.16         d4, d14                 @ P2[4], P2[5], P2[6], P2[7], P6[4]... P4[4], P4[5], P4[6], P4[7], P8[4]...
        vtrn.8          d12, d13                @ P1[10], P1[11], P3[10]... P2[10], P2[11], P4[10]...
        vdup.16         q9, r2                  @ pq
        vtrn.8          d1, d7                  @ P1[12], P1[13], P3[12]... P2[12], P2[13], P4[12]...
        vtrn.32         d2, d16                 @ P1[0..7], P5[0..7]
        vtrn.16         d5, d12                 @ P1[8], P1[7], P1[10], P1[11], P5[8]... P3[8], P3[9], P3[10], P3[11], P7[8]...
        vtrn.16         d6, d13                 @ P2[8], P2[7], P2[10], P2[11], P6[8]... P4[8], P4[9], P4[10], P4[11], P8[8]...
        vtrn.8          d9, d15                 @ P1[14], P1[15], P3[14]... P2[14], P2[15], P4[14]...
        vtrn.32         d3, d4                  @ P2[0..7], P6[0..7]
        vshll.u8        q10, d2, #1             @ 2*P1[0..7]
        vtrn.32         d10, d8                 @ P3[0..7], P7[0..7]
        vshll.u8        q11, d16, #1            @ 2*P5[0..7]
        vtrn.32         d11, d14                @ P4[0..7], P8[0..7]
        vtrn.16         d1, d9                  @ P1[12], P1[13], P1[14], P1[15], P5[12]... P3[12], P3[13], P3[14], P3[15], P7[12]...
        vtrn.16         d7, d15                 @ P2[12], P2[13], P2[14], P2[15], P6[12]... P4[12], P4[13], P4[14], P4[15], P8[12]...
        vmovl.u8        q1, d3                  @ P2[0..7]
        vmovl.u8        q12, d4                 @ P6[0..7]
        vtrn.32         d5, d1                  @ P1[8..15], P5[8..15]
        vtrn.32         d6, d7                  @ P2[8..15], P6[8..15]
        vtrn.32         d12, d9                 @ P3[8..15], P7[8..15]
        vtrn.32         d13, d15                @ P4[8..15], P8[8..15]
        vmls.i16        q10, q1, d0[1]          @ 2*P1[0..7]-5*P2[0..7]
        vmovl.u8        q1, d10                 @ P3[0..7]
        vshll.u8        q2, d5, #1              @ 2*P1[8..15]
        vshll.u8        q13, d1, #1             @ 2*P5[8..15]
        vmls.i16        q11, q12, d0[1]         @ 2*P5[0..7]-5*P6[0..7]
        vmovl.u8        q14, d6                 @ P2[8..15]
        vmovl.u8        q3, d7                  @ P6[8..15]
        vmovl.u8        q15, d8                 @ P7[0..7]
        vmla.i16        q10, q1, d0[1]          @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]
        vmovl.u8        q1, d12                 @ P3[8..15]
        vmls.i16        q2, q14, d0[1]          @ 2*P1[8..15]-5*P2[8..15]
        vmovl.u8        q4, d9                  @ P7[8..15]
        vshll.u8        q14, d10, #1            @ 2*P3[0..7]
        vmls.i16        q13, q3, d0[1]          @ 2*P5[8..15]-5*P6[8..15]
        vmovl.u8        q5, d11                 @ P4[0..7]
        vmla.i16        q11, q15, d0[1]         @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]
        vshll.u8        q15, d12, #1            @ 2*P3[8..15]
        vmovl.u8        q6, d13                 @ P4[8..15]
        vmla.i16        q2, q1, d0[1]           @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]
        vmovl.u8        q1, d14                 @ P8[0..7]
        vmovl.u8        q7, d15                 @ P8[8..15]
        vmla.i16        q13, q4, d0[1]          @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]
        vmovl.u8        q4, d16                 @ P5[0..7]
        vmovl.u8        q8, d1                  @ P5[8..15]
        vmls.i16        q14, q5, d0[1]          @ 2*P3[0..7]-5*P4[0..7]
        vmls.i16        q15, q6, d0[1]          @ 2*P3[8..15]-5*P4[8..15]
        vmls.i16        q10, q5, d0[0]          @ 2*P1[0..7]-5*P2[0..7]+5*P3[0..7]-2*P4[0..7]
        vmls.i16        q11, q1, d0[0]          @ 2*P5[0..7]-5*P6[0..7]+5*P7[0..7]-2*P8[0..7]
        vsub.i16        q1, q5, q4              @ P4[0..7]-P5[0..7]
        vmls.i16        q2, q6, d0[0]           @ 2*P1[8..15]-5*P2[8..15]+5*P3[8..15]-2*P4[8..15]
        vrshr.s16       q10, q10, #3
        vmls.i16        q13, q7, d0[0]          @ 2*P5[8..15]-5*P6[8..15]+5*P7[8..15]-2*P8[8..15]
        vsub.i16        q7, q6, q8              @ P4[8..15]-P5[8..15]
        vrshr.s16       q11, q11, #3
        vmla.s16        q14, q4, d0[1]          @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]
        vrshr.s16       q2, q2, #3
        vmla.i16        q15, q8, d0[1]          @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]
        vabs.s16        q10, q10                @ a1[0..7]
        vrshr.s16       q13, q13, #3
        vmls.i16        q15, q3, d0[0]          @ 2*P3[8..15]-5*P4[8..15]+5*P5[8..15]-2*P6[8..15]
        vabs.s16        q3, q11                 @ a2[0..7]
        vabs.s16        q2, q2                  @ a1[8..15]
        vmls.i16        q14, q12, d0[0]         @ 2*P3[0..7]-5*P4[0..7]+5*P5[0..7]-2*P6[0..7]
        vabs.s16        q11, q1
        vabs.s16        q12, q13                @ a2[8..15]
        vcge.s16        q13, q10, q3            @ test a1[0..7] >= a2[0..7]
        vshr.s16        q1, q1, #8              @ clip_sign[0..7]
        vrshr.s16       q15, q15, #3
        vshr.s16        q11, q11, #1            @ clip[0..7]
        vrshr.s16       q14, q14, #3
        vbsl            q13, q3, q10            @ a3[0..7]
        vcge.s16        q3, q2, q12             @ test a1[8..15] >= a2[8.15]
        vabs.s16        q10, q15                @ a0[8..15]
        vshr.s16        q15, q15, #8            @ a0_sign[8..15]
        vbsl            q3, q12, q2             @ a3[8..15]
        vabs.s16        q2, q14                 @ a0[0..7]
        vabs.s16        q12, q7
        vshr.s16        q7, q7, #8              @ clip_sign[8..15]
        vshr.s16        q14, q14, #8            @ a0_sign[0..7]
        vshr.s16        q12, q12, #1            @ clip[8..15]
        vsub.i16        q7, q7, q15             @ clip_sign[8..15] - a0_sign[8..15]
        vqsub.u16       q15, q10, q3            @ a0[8..15] >= a3[8..15] ? a0[8..15]-a3[8..15] : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        q3, q3, q10             @ test a3[8..15] >= a0[8..15]
        vcge.s16        q10, q10, q9            @ test a0[8..15] >= pq
        vcge.s16        q9, q2, q9              @ test a0[0..7] >= pq
        vsub.i16        q1, q1, q14             @ clip_sign[0..7] - a0_sign[0..7]
        vqsub.u16       q14, q2, q13            @ a0[0..7] >= a3[0..7] ? a0[0..7]-a3[0..7] : 0  (a0 > a3 in all cases where filtering is enabled, so makes more sense to subtract this way round than the opposite and then taking the abs)
        vcge.s16        q2, q13, q2             @ test a3[0..7] >= a0[0..7]
        vmul.i16        q13, q15, d0[1]         @ a0[8..15] >= a3[8..15] ? 5*(a0[8..15]-a3[8..15]) : 0
        vceq.i16        q15, q11, #0            @ test clip[0..7] == 0
        vmul.i16        q0, q14, d0[1]          @ a0[0..7] >= a3[0..7] ? 5*(a0[0..7]-a3[0..7]) : 0
        vorr            q9, q15, q9             @ test clip[0..7] == 0 || a0[0..7] >= pq
        vceq.i16        q14, q12, #0            @ test clip[8..15] == 0
        vshr.u16        q13, q13, #3            @ a0[8..15] >= a3[8..15] ? (5*(a0[8..15]-a3[8..15]))>>3 : 0
        vorr            q2, q9, q2              @ test clip[0..7] == 0 || a0[0..7] >= pq || a3[0..7] >= a0[0..7]
        vshr.u16        q0, q0, #3              @ a0[0..7] >= a3[0..7] ? (5*(a0[0..7]-a3[0..7]))>>3 : 0
        vorr            q10, q14, q10           @ test clip[8..15] == 0 || a0[8..15] >= pq
        vcge.s16        q14, q13, q12
        vmov.32         r2, d4[1]               @ move to gp reg
        vorr            q3, q10, q3             @ test clip[8..15] == 0 || a0[8..15] >= pq || a3[8..15] >= a0[8..15]
        vmov.32         r3, d5[1]
        vcge.s16        q2, q0, q11
        vbsl            q14, q12, q13           @ FFMIN(d[8..15], clip[8..15])
        vbsl            q2, q11, q0             @ FFMIN(d[0..7], clip[0..7])
        vmov.32         r5, d6[1]
        vbic            q0, q14, q10            @ set each d[8..15] to zero if it should not be filtered because clip[8..15] == 0 || a0[8..15] >= pq (a3 > a0 case already zeroed by saturating sub)
        vmov.32         r6, d7[1]
        and             r12, r2, r3
        vbic            q2, q2, q9              @ set each d[0..7] to zero if it should not be filtered because clip[0..7] == 0 || a0[0..7] >= pq (a3 > a0 case already zeroed by saturating sub)
        vmls.i16        q6, q0, q7              @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P4
        vmls.i16        q5, q2, q1              @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P4
        and             r14, r5, r6
        vmla.i16        q4, q2, q1              @ invert d[0..7] depending on clip_sign[0..7] & a0_sign[0..7], or zero it if they match, and accumulate into P5
        and             r12, r12, r14
        vqmovun.s16     d4, q6
        vmla.i16        q8, q0, q7              @ invert d[8..15] depending on clip_sign[8..15] & a0_sign[8..15], or zero it if they match, and accumulate into P5
        tst             r12, #1
        bne             4f                      @ none of the 16 pixel pairs should be updated in this case
        vqmovun.s16     d2, q5
        vqmovun.s16     d3, q4
        vqmovun.s16     d5, q8
        tst             r2, #1
        bne             1f
        vst2.8          {d2[0], d3[0]}, [r0], r1
        vst2.8          {d2[1], d3[1]}, [r0], r1
        vst2.8          {d2[2], d3[2]}, [r0], r1
        vst2.8          {d2[3], d3[3]}, [r0]
1:      add             r0, r4, r1, lsl #2
        tst             r3, #1
        bne             2f
        vst2.8          {d2[4], d3[4]}, [r4], r1
        vst2.8          {d2[5], d3[5]}, [r4], r1
        vst2.8          {d2[6], d3[6]}, [r4], r1
        vst2.8          {d2[7], d3[7]}, [r4]
2:      add             r4, r0, r1, lsl #2
        tst             r5, #1
        bne             3f
        vst2.8          {d4[0], d5[0]}, [r0], r1
        vst2.8          {d4[1], d5[1]}, [r0], r1
        vst2.8          {d4[2], d5[2]}, [r0], r1
        vst2.8          {d4[3], d5[3]}, [r0]
3:      tst             r6, #1
        bne             4f
        vst2.8          {d4[4], d5[4]}, [r4], r1
        vst2.8          {d4[5], d5[5]}, [r4], r1
        vst2.8          {d4[6], d5[6]}, [r4], r1
        vst2.8          {d4[7], d5[7]}, [r4]
4:      vpop            {d8-d15}
        pop             {r4-r6,pc}
endfunc

@ Copy at most the specified number of bytes from source to destination buffer,
@ stopping at a multiple of 16 bytes, none of which are the start of an escape sequence
@ On entry:
@   r0 -> source buffer
@   r1 = max number of bytes to copy
@   r2 -> destination buffer, optimally 8-byte aligned
@ On exit:
@   r0 = number of bytes not copied
function ff_vc1_unescape_buffer_helper_neon, export=1
        @ Offset by 48 to screen out cases that are too short for us to handle,
        @ and also make it easy to test for loop termination, or to determine
        @ whether we need an odd number of half-iterations of the loop.
        subs    r1, r1, #48
        bmi     90f

        @ Set up useful constants
        vmov.i32        q0, #0x3000000
        vmov.i32        q1, #0x30000

        tst             r1, #16
        bne             1f

          vld1.8          {q8, q9}, [r0]!
          vbic            q12, q8, q0
          vext.8          q13, q8, q9, #1
          vext.8          q14, q8, q9, #2
          vext.8          q15, q8, q9, #3
          veor            q12, q12, q1
          vbic            q13, q13, q0
          vbic            q14, q14, q0
          vbic            q15, q15, q0
          vceq.i32        q12, q12, #0
          veor            q13, q13, q1
          veor            q14, q14, q1
          veor            q15, q15, q1
          vceq.i32        q13, q13, #0
          vceq.i32        q14, q14, #0
          vceq.i32        q15, q15, #0
          add             r1, r1, #16
          b               3f

1:      vld1.8          {q10, q11}, [r0]!
        vbic            q12, q10, q0
        vext.8          q13, q10, q11, #1
        vext.8          q14, q10, q11, #2
        vext.8          q15, q10, q11, #3
        veor            q12, q12, q1
        vbic            q13, q13, q0
        vbic            q14, q14, q0
        vbic            q15, q15, q0
        vceq.i32        q12, q12, #0
        veor            q13, q13, q1
        veor            q14, q14, q1
        veor            q15, q15, q1
        vceq.i32        q13, q13, #0
        vceq.i32        q14, q14, #0
        vceq.i32        q15, q15, #0
        @ Drop through...
2:        vmov            q8, q11
          vld1.8          {q9}, [r0]!
        vorr            q13, q12, q13
        vorr            q15, q14, q15
          vbic            q12, q8, q0
        vorr            q3, q13, q15
          vext.8          q13, q8, q9, #1
          vext.8          q14, q8, q9, #2
          vext.8          q15, q8, q9, #3
          veor            q12, q12, q1
        vorr            d6, d6, d7
          vbic            q13, q13, q0
          vbic            q14, q14, q0
          vbic            q15, q15, q0
          vceq.i32        q12, q12, #0
        vmov            r3, r12, d6
          veor            q13, q13, q1
          veor            q14, q14, q1
          veor            q15, q15, q1
          vceq.i32        q13, q13, #0
          vceq.i32        q14, q14, #0
          vceq.i32        q15, q15, #0
        orrs            r3, r3, r12
        bne             90f
        vst1.64         {q10}, [r2]!
3:          vmov            q10, q9
            vld1.8          {q11}, [r0]!
          vorr            q13, q12, q13
          vorr            q15, q14, q15
            vbic            q12, q10, q0
          vorr            q3, q13, q15
            vext.8          q13, q10, q11, #1
            vext.8          q14, q10, q11, #2
            vext.8          q15, q10, q11, #3
            veor            q12, q12, q1
          vorr            d6, d6, d7
            vbic            q13, q13, q0
            vbic            q14, q14, q0
            vbic            q15, q15, q0
            vceq.i32        q12, q12, #0
          vmov            r3, r12, d6
            veor            q13, q13, q1
            veor            q14, q14, q1
            veor            q15, q15, q1
            vceq.i32        q13, q13, #0
            vceq.i32        q14, q14, #0
            vceq.i32        q15, q15, #0
          orrs            r3, r3, r12
          bne             91f
          vst1.64         {q8}, [r2]!
        subs            r1, r1, #32
        bpl             2b

90:     add             r0, r1, #48
        bx              lr

91:     sub             r1, r1, #16
        b               90b
endfunc