+.endm
+
+.macro vzero vz=q15
+ // Set VZ (default q15) to zero.
+ vmov.u32 \vz, #0
+.endm
+
+.macro vshl128 vd, vn, nbit, vz=q15
+ // Set VD to VN shifted left by NBIT. Assume VZ (default q15) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ vext.8 \vd, \vz, \vn, #16 - (\nbit >> 3)
+.endm
+
+.macro vshr128 vd, vn, nbit, vz=q15
+ // Set VD to VN shifted right by NBIT. Assume VZ (default q15) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ vext.8 \vd, \vn, \vz, #\nbit >> 3
+.endm
+
+// Apply decoration decor to register name reg.
+#define _REGFORM(reg, decor) _GLUE(_REGFORM_, reg)(decor)
+
+// Internal macros: `_REGFORM_r(decor)' applies decoration decor to register
+// name r.
+
+#define _REGFORM_s0(decor) _DECOR(s, decor, 0)
+#define _REGFORM_s1(decor) _DECOR(s, decor, 1)
+#define _REGFORM_s2(decor) _DECOR(s, decor, 2)
+#define _REGFORM_s3(decor) _DECOR(s, decor, 3)
+#define _REGFORM_s4(decor) _DECOR(s, decor, 4)
+#define _REGFORM_s5(decor) _DECOR(s, decor, 5)
+#define _REGFORM_s6(decor) _DECOR(s, decor, 6)
+#define _REGFORM_s7(decor) _DECOR(s, decor, 7)
+#define _REGFORM_s8(decor) _DECOR(s, decor, 8)
+#define _REGFORM_s9(decor) _DECOR(s, decor, 9)
+#define _REGFORM_s10(decor) _DECOR(s, decor, 10)
+#define _REGFORM_s11(decor) _DECOR(s, decor, 11)
+#define _REGFORM_s12(decor) _DECOR(s, decor, 12)
+#define _REGFORM_s13(decor) _DECOR(s, decor, 13)
+#define _REGFORM_s14(decor) _DECOR(s, decor, 14)
+#define _REGFORM_s15(decor) _DECOR(s, decor, 15)
+#define _REGFORM_s16(decor) _DECOR(s, decor, 16)
+#define _REGFORM_s17(decor) _DECOR(s, decor, 17)
+#define _REGFORM_s18(decor) _DECOR(s, decor, 18)
+#define _REGFORM_s19(decor) _DECOR(s, decor, 19)
+#define _REGFORM_s20(decor) _DECOR(s, decor, 20)
+#define _REGFORM_s21(decor) _DECOR(s, decor, 21)
+#define _REGFORM_s22(decor) _DECOR(s, decor, 22)
+#define _REGFORM_s23(decor) _DECOR(s, decor, 23)
+#define _REGFORM_s24(decor) _DECOR(s, decor, 24)
+#define _REGFORM_s25(decor) _DECOR(s, decor, 25)
+#define _REGFORM_s26(decor) _DECOR(s, decor, 26)
+#define _REGFORM_s27(decor) _DECOR(s, decor, 27)
+#define _REGFORM_s28(decor) _DECOR(s, decor, 28)
+#define _REGFORM_s29(decor) _DECOR(s, decor, 29)
+#define _REGFORM_s30(decor) _DECOR(s, decor, 30)
+#define _REGFORM_s31(decor) _DECOR(s, decor, 31)
+
+#define _REGFORM_d0(decor) _DECOR(d, decor, 0)
+#define _REGFORM_d1(decor) _DECOR(d, decor, 1)
+#define _REGFORM_d2(decor) _DECOR(d, decor, 2)
+#define _REGFORM_d3(decor) _DECOR(d, decor, 3)
+#define _REGFORM_d4(decor) _DECOR(d, decor, 4)
+#define _REGFORM_d5(decor) _DECOR(d, decor, 5)
+#define _REGFORM_d6(decor) _DECOR(d, decor, 6)
+#define _REGFORM_d7(decor) _DECOR(d, decor, 7)
+#define _REGFORM_d8(decor) _DECOR(d, decor, 8)
+#define _REGFORM_d9(decor) _DECOR(d, decor, 9)
+#define _REGFORM_d10(decor) _DECOR(d, decor, 10)
+#define _REGFORM_d11(decor) _DECOR(d, decor, 11)
+#define _REGFORM_d12(decor) _DECOR(d, decor, 12)
+#define _REGFORM_d13(decor) _DECOR(d, decor, 13)
+#define _REGFORM_d14(decor) _DECOR(d, decor, 14)
+#define _REGFORM_d15(decor) _DECOR(d, decor, 15)
+#define _REGFORM_d16(decor) _DECOR(d, decor, 16)
+#define _REGFORM_d17(decor) _DECOR(d, decor, 17)
+#define _REGFORM_d18(decor) _DECOR(d, decor, 18)
+#define _REGFORM_d19(decor) _DECOR(d, decor, 19)
+#define _REGFORM_d20(decor) _DECOR(d, decor, 20)
+#define _REGFORM_d21(decor) _DECOR(d, decor, 21)
+#define _REGFORM_d22(decor) _DECOR(d, decor, 22)
+#define _REGFORM_d23(decor) _DECOR(d, decor, 23)
+#define _REGFORM_d24(decor) _DECOR(d, decor, 24)
+#define _REGFORM_d25(decor) _DECOR(d, decor, 25)
+#define _REGFORM_d26(decor) _DECOR(d, decor, 26)
+#define _REGFORM_d27(decor) _DECOR(d, decor, 27)
+#define _REGFORM_d28(decor) _DECOR(d, decor, 28)
+#define _REGFORM_d29(decor) _DECOR(d, decor, 29)
+#define _REGFORM_d30(decor) _DECOR(d, decor, 30)
+#define _REGFORM_d31(decor) _DECOR(d, decor, 31)
+
+#define _REGFORM_q0(decor) _DECOR(q, decor, 0)
+#define _REGFORM_q1(decor) _DECOR(q, decor, 1)
+#define _REGFORM_q2(decor) _DECOR(q, decor, 2)
+#define _REGFORM_q3(decor) _DECOR(q, decor, 3)
+#define _REGFORM_q4(decor) _DECOR(q, decor, 4)
+#define _REGFORM_q5(decor) _DECOR(q, decor, 5)
+#define _REGFORM_q6(decor) _DECOR(q, decor, 6)
+#define _REGFORM_q7(decor) _DECOR(q, decor, 7)
+#define _REGFORM_q8(decor) _DECOR(q, decor, 8)
+#define _REGFORM_q9(decor) _DECOR(q, decor, 9)
+#define _REGFORM_q10(decor) _DECOR(q, decor, 10)
+#define _REGFORM_q11(decor) _DECOR(q, decor, 11)
+#define _REGFORM_q12(decor) _DECOR(q, decor, 12)
+#define _REGFORM_q13(decor) _DECOR(q, decor, 13)
+#define _REGFORM_q14(decor) _DECOR(q, decor, 14)
+#define _REGFORM_q15(decor) _DECOR(q, decor, 15)
+
+// `_LOPART(n)' and `_HIPART(n)' return the numbers of the register halves of
+// register n, i.e., 2*n and 2*n + 1 respectively.
+#define _LOPART(n) _GLUE(_LOPART_, n)
+#define _HIPART(n) _GLUE(_HIPART_, n)
+
+// Internal macros: `_LOPART_n' and `_HIPART_n' return the numbers of the
+// register halves of register n, i.e., 2*n and 2*n + 1 respectively.
+
+#define _LOPART_0 0
+#define _HIPART_0 1
+#define _LOPART_1 2
+#define _HIPART_1 3
+#define _LOPART_2 4
+#define _HIPART_2 5
+#define _LOPART_3 6
+#define _HIPART_3 7
+#define _LOPART_4 8
+#define _HIPART_4 9
+#define _LOPART_5 10
+#define _HIPART_5 11
+#define _LOPART_6 12
+#define _HIPART_6 13
+#define _LOPART_7 14
+#define _HIPART_7 15
+#define _LOPART_8 16
+#define _HIPART_8 17
+#define _LOPART_9 18
+#define _HIPART_9 19
+#define _LOPART_10 20
+#define _HIPART_10 21
+#define _LOPART_11 22
+#define _HIPART_11 23
+#define _LOPART_12 24
+#define _HIPART_12 25
+#define _LOPART_13 26
+#define _HIPART_13 27
+#define _LOPART_14 28
+#define _HIPART_14 29
+#define _LOPART_15 30
+#define _HIPART_15 31
+
+// Return the register number of the pair containing register n, i.e.,
+// floor(n/2).
+#define _PAIR(n) _GLUE(_PAIR_, n)
+
+// Internal macros: `_PAIR_n' returns the register number of the pair
+// containing register n, i.e., floor(n/2).
+#define _PAIR_0 0
+#define _PAIR_1 0
+#define _PAIR_2 1
+#define _PAIR_3 1
+#define _PAIR_4 2
+#define _PAIR_5 2
+#define _PAIR_6 3
+#define _PAIR_7 3
+#define _PAIR_8 4
+#define _PAIR_9 4
+#define _PAIR_10 5
+#define _PAIR_11 5
+#define _PAIR_12 6
+#define _PAIR_13 6
+#define _PAIR_14 7
+#define _PAIR_15 7
+#define _PAIR_16 8
+#define _PAIR_17 8
+#define _PAIR_18 9
+#define _PAIR_19 9
+#define _PAIR_20 10
+#define _PAIR_21 10
+#define _PAIR_22 11
+#define _PAIR_23 11
+#define _PAIR_24 12
+#define _PAIR_25 12
+#define _PAIR_26 13
+#define _PAIR_27 13
+#define _PAIR_28 14
+#define _PAIR_29 14
+#define _PAIR_30 15
+#define _PAIR_31 15
+
+// Apply decoration decor to register number n of type ty. Decorations are
+// as follows.
+//
+// decor types meaning
+// Q s, d the NEON qN register containing this one
+// D s the NEON dN register containing this one
+// D0 q the low 64-bit half of this one
+// D1 q the high 64-bit half of this one
+// S0 d, q the first 32-bit piece of this one
+// S1 d, q the second 32-bit piece of this one
+// S2 q the third 32-bit piece of this one
+// S3 q the fourth 32-bit piece of this one
+// Bn q the nth byte of this register, as a scalar
+// Hn q the nth halfword of this register, as a scalar
+// Wn q the nth word of this register, as a scalar
+#define _DECOR(ty, decor, n) _DECOR_##ty##_##decor(n)
+
+// Internal macros: `_DECOR_ty_decor(n)' applies decoration decor to register
+// number n of type ty.
+
+#define _DECOR_s_Q(n) GLUE(q, _PAIR(_PAIR(n)))
+#define _DECOR_s_D(n) GLUE(d, _PAIR(n))
+
+#define _DECOR_d_Q(n) GLUE(q, _PAIR(n))
+#define _DECOR_d_S0(n) GLUE(s, _LOPART(n))
+#define _DECOR_d_S1(n) GLUE(s, _LOPART(n))
+
+#define _DECOR_q_D0(n) GLUE(d, _LOPART(n))
+#define _DECOR_q_D1(n) GLUE(d, _HIPART(n))
+#define _DECOR_q_S0(n) GLUE(s, _LOPART(_LOPART(n)))
+#define _DECOR_q_S1(n) GLUE(s, _HIPART(_LOPART(n)))
+#define _DECOR_q_S2(n) GLUE(s, _LOPART(_HIPART(n)))
+#define _DECOR_q_S3(n) GLUE(s, _HIPART(_HIPART(n)))
+#define _DECOR_q_W0(n) GLUE(d, _LOPART(n))[0]
+#define _DECOR_q_W1(n) GLUE(d, _LOPART(n))[1]
+#define _DECOR_q_W2(n) GLUE(d, _HIPART(n))[0]
+#define _DECOR_q_W3(n) GLUE(d, _HIPART(n))[1]
+#define _DECOR_q_H0(n) GLUE(d, _LOPART(n))[0]
+#define _DECOR_q_H1(n) GLUE(d, _LOPART(n))[1]
+#define _DECOR_q_H2(n) GLUE(d, _LOPART(n))[2]
+#define _DECOR_q_H3(n) GLUE(d, _LOPART(n))[3]
+#define _DECOR_q_H4(n) GLUE(d, _HIPART(n))[0]
+#define _DECOR_q_H5(n) GLUE(d, _HIPART(n))[1]
+#define _DECOR_q_H6(n) GLUE(d, _HIPART(n))[2]
+#define _DECOR_q_H7(n) GLUE(d, _HIPART(n))[3]
+#define _DECOR_q_B0(n) GLUE(d, _LOPART(n))[0]
+#define _DECOR_q_B1(n) GLUE(d, _LOPART(n))[1]
+#define _DECOR_q_B2(n) GLUE(d, _LOPART(n))[2]
+#define _DECOR_q_B3(n) GLUE(d, _LOPART(n))[3]
+#define _DECOR_q_B4(n) GLUE(d, _LOPART(n))[4]
+#define _DECOR_q_B5(n) GLUE(d, _LOPART(n))[5]
+#define _DECOR_q_B6(n) GLUE(d, _LOPART(n))[6]
+#define _DECOR_q_B7(n) GLUE(d, _LOPART(n))[7]
+#define _DECOR_q_B8(n) GLUE(d, _HIPART(n))[0]
+#define _DECOR_q_B9(n) GLUE(d, _HIPART(n))[1]
+#define _DECOR_q_B10(n) GLUE(d, _HIPART(n))[2]
+#define _DECOR_q_B11(n) GLUE(d, _HIPART(n))[3]
+#define _DECOR_q_B12(n) GLUE(d, _HIPART(n))[4]
+#define _DECOR_q_B13(n) GLUE(d, _HIPART(n))[5]
+#define _DECOR_q_B14(n) GLUE(d, _HIPART(n))[6]
+#define _DECOR_q_B15(n) GLUE(d, _HIPART(n))[7]
+
+// Macros for navigating the NEON register hierarchy.
+#define S0(reg) _REGFORM(reg, S0)
+#define S1(reg) _REGFORM(reg, S1)
+#define S2(reg) _REGFORM(reg, S2)
+#define S3(reg) _REGFORM(reg, S3)
+#define D(reg) _REGFORM(reg, D)
+#define D0(reg) _REGFORM(reg, D0)
+#define D1(reg) _REGFORM(reg, D1)
+#define Q(reg) _REGFORM(reg, Q)
+
+// Macros for indexing quadword registers.
+#define QB(reg, i) _REGFORM(reg, B##i)
+#define QH(reg, i) _REGFORM(reg, H##i)
+#define QW(reg, i) _REGFORM(reg, W##i)
+
+// Macros for converting vldm/vstm ranges.
+#define QQ(qlo, qhi) D0(qlo)-D1(qhi)
+
+// Stack management and unwinding.
+.macro setfp fp=r11, offset=0
+ .if \offset == 0
+ mov \fp, sp
+ .setfp \fp, sp
+ .else
+ add \fp, sp, #\offset
+ .setfp \fp, sp, #\offset
+ .endif
+ .macro dropfp; _dropfp \fp, \offset; .endm
+ .L$_frameptr_p = -1
+.endm
+
+.macro _dropfp fp, offset=0
+ .if \offset == 0
+ mov sp, \fp
+ .else
+ sub sp, \fp, #\offset
+ .endif
+ .purgem dropfp
+ .L$_frameptr_p = 0
+.endm
+
+.macro stalloc n
+ sub sp, sp, #\n
+ .pad #\n
+.endm
+
+.macro stfree n
+ add sp, sp, #\n
+ .pad #-\n
+.endm
+
+.macro pushreg rr:vararg
+ push {\rr}
+ .save {\rr}
+.endm
+
+.macro popreg rr:vararg
+ pop {\rr}
+.endm
+
+.macro pushvfp rr:vararg
+ vstmdb sp!, {\rr}
+ .vsave {\rr}
+.endm
+
+.macro popvfp rr:vararg
+ vldmia sp!, {\rr}
+.endm
+
+.macro endprologue
+.endm
+
+// No need for prologue markers on ARM.
+#define FUNC_POSTHOOK(_) .L$_prologue_p = -1
+
+#endif
+
+///--------------------------------------------------------------------------
+/// AArch64-specific hacking.
+
+#if CPUFAM_ARM64
+
+// Set the function hooks.
+#define FUNC_PREHOOK(_) .balign 4
+#define FUNC_POSTHOOK(_) .cfi_startproc; .L$_prologue_p = -1
+#define ENDFUNC_HOOK(_) .cfi_endproc
+
+// Call external subroutine at ADDR, possibly via PLT.
+.macro callext addr
+ bl \addr
+.endm
+
+// Load address of external symbol ADDR into REG.
+.macro leaext reg, addr
+#if WANT_PIC
+ adrp \reg, :got:\addr
+ ldr \reg, [\reg, #:got_lo12:\addr]
+#else
+ adrp \reg, \addr
+ add \reg, \reg, #:lo12:\addr
+#endif
+.endm
+
+.macro vzero vz=v31
+ // Set VZ (default v31) to zero.
+ dup \vz\().4s, wzr
+.endm
+
+.macro vshl128 vd, vn, nbit, vz=v31
+ // Set VD to VN shifted left by NBIT. Assume VZ (default v31) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ ext \vd\().16b, \vz\().16b, \vn\().16b, #16 - (\nbit >> 3)
+.endm
+
+.macro vshr128 vd, vn, nbit, vz=v31
+ // Set VD to VN shifted right by NBIT. Assume VZ (default v31) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ ext \vd\().16b, \vn\().16b, \vz\().16b, #\nbit >> 3
+.endm
+
+// Stack management and unwinding.
+.macro setfp fp=x29, offset=0
+ // If you're just going through the motions with a fixed-size stack frame,
+ // then you want to say `add x29, sp, #OFFSET' directly, which will avoid
+ // pointlessly restoring sp later.
+ .if \offset == 0
+ mov \fp, sp
+ .cfi_def_cfa_register \fp
+ .else
+ add \fp, sp, #\offset
+ .cfi_def_cfa_register \fp
+ .cfi_adjust_cfa_offset -\offset
+ .endif
+ .macro dropfp; _dropfp \fp, \offset; .endm
+ .L$_frameptr_p = -1
+.endm
+
+.macro _dropfp fp, offset=0
+ .if \offset == 0
+ mov sp, \fp
+ .cfi_def_cfa_register sp
+ .else
+ sub sp, \fp, #\offset
+ .cfi_def_cfa_register sp
+ .cfi_adjust_cfa_offset +\offset
+ .endif
+ .purgem dropfp
+ .L$_frameptr_p = 0
+.endm
+
+.macro stalloc n
+ sub sp, sp, #\n
+ .cfi_adjust_cfa_offset +\n
+.endm
+
+.macro stfree n
+ add sp, sp, #\n
+ .cfi_adjust_cfa_offset -\n
+.endm
+
+.macro pushreg x, y=nil
+ .ifeqs "\y", "nil"
+ str \x, [sp, #-16]!
+ .cfi_adjust_cfa_offset +16
+ .cfi_rel_offset \x, 0
+ .else
+ stp \x, \y, [sp, #-16]!
+ .cfi_adjust_cfa_offset +16
+ .cfi_rel_offset \x, 0
+ .cfi_rel_offset \y, 8
+ .endif
+.endm
+
+.macro popreg x, y=nil
+ .ifeqs "\y", "nil"
+ ldr \x, [sp], #16
+ .cfi_restore \x
+ .cfi_adjust_cfa_offset -16
+ .else
+ ldp \x, \y, [sp], #16
+ .cfi_restore \x
+ .cfi_restore \y
+ .cfi_adjust_cfa_offset -16
+ .endif
+.endm
+
+.macro savereg x, y, z=nil
+ .ifeqs "\z", "nil"
+ str \x, [sp, \y]
+ .cfi_rel_offset \x, \y
+ .else
+ stp \x, \y, [sp, #\z]
+ .cfi_rel_offset \x, \z
+ .cfi_rel_offset \y, \z + 8
+ .endif
+.endm
+
+.macro rstrreg x, y, z=nil
+ .ifeqs "\z", "nil"
+ ldr \x, [sp, \y]
+ .cfi_restore \x
+ .else
+ ldp \x, \y, [sp, #\z]
+ .cfi_restore \x
+ .cfi_restore \y
+ .endif
+.endm
+
+.macro endprologue
+.endm