+/// AArch64-specific hacking.
+
+#if CPUFAM_ARM64
+
+// Set the function hooks.
+#define FUNC_PREHOOK(_) .balign 4
+#define FUNC_POSTHOOK(_) .cfi_startproc; .L$_prologue_p = -1
+#define ENDFUNC_HOOK(_) .cfi_endproc
+
+// Call external subroutine at ADDR, possibly via PLT.
+.macro callext addr
+ bl \addr
+.endm
+
+// Load address of external symbol ADDR into REG.
+.macro leaext reg, addr
+#if WANT_PIC
+ adrp \reg, :got:\addr
+ ldr \reg, [\reg, #:got_lo12:\addr]
+#else
+ adrp \reg, \addr
+ add \reg, \reg, #:lo12:\addr
+#endif
+.endm
+
+.macro vzero vz=v31
+ // Set VZ (default v31) to zero.
+ dup \vz\().4s, wzr
+.endm
+
+.macro vshl128 vd, vn, nbit, vz=v31
+ // Set VD to VN shifted left by NBIT. Assume VZ (default v31) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ ext \vd\().16b, \vz\().16b, \vn\().16b, #16 - (\nbit >> 3)
+.endm
+
+.macro vshr128 vd, vn, nbit, vz=v31
+ // Set VD to VN shifted right by NBIT. Assume VZ (default v31) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ ext \vd\().16b, \vn\().16b, \vz\().16b, #\nbit >> 3
+.endm
+
+// Stack management and unwinding.
+.macro setfp fp=x29, offset=0
+ // If you're just going through the motions with a fixed-size stack frame,
+ // then you want to say `add x29, sp, #OFFSET' directly, which will avoid
+ // pointlessly restoring sp later.
+ .if \offset == 0
+ mov \fp, sp
+ .cfi_def_cfa_register \fp
+ .else
+ add \fp, sp, #\offset
+ .cfi_def_cfa_register \fp
+ .cfi_adjust_cfa_offset -\offset
+ .endif
+ .macro dropfp; _dropfp \fp, \offset; .endm
+ .L$_frameptr_p = -1
+.endm
+
+.macro _dropfp fp, offset=0
+ .if \offset == 0
+ mov sp, \fp
+ .cfi_def_cfa_register sp
+ .else
+ sub sp, \fp, #\offset
+ .cfi_def_cfa_register sp
+ .cfi_adjust_cfa_offset +\offset
+ .endif
+ .purgem dropfp
+ .L$_frameptr_p = 0
+.endm
+
+.macro stalloc n
+ sub sp, sp, #\n
+ .cfi_adjust_cfa_offset +\n
+.endm
+
+.macro stfree n
+ add sp, sp, #\n
+ .cfi_adjust_cfa_offset -\n
+.endm
+
+.macro pushreg x, y=nil
+ .ifeqs "\y", "nil"
+ str \x, [sp, #-16]!
+ .cfi_adjust_cfa_offset +16
+ .cfi_rel_offset \x, 0
+ .else
+ stp \x, \y, [sp, #-16]!
+ .cfi_adjust_cfa_offset +16
+ .cfi_rel_offset \x, 0
+ .cfi_rel_offset \y, 8
+ .endif
+.endm
+
+.macro popreg x, y=nil
+ .ifeqs "\y", "nil"
+ ldr \x, [sp], #16
+ .cfi_restore \x
+ .cfi_adjust_cfa_offset -16
+ .else
+ ldp \x, \y, [sp], #16
+ .cfi_restore \x
+ .cfi_restore \y
+ .cfi_adjust_cfa_offset -16
+ .endif
+.endm
+
+.macro savereg x, y, z=nil
+ .ifeqs "\z", "nil"
+ str \x, [sp, \y]
+ .cfi_rel_offset \x, \y
+ .else
+ stp \x, \y, [sp, #\z]
+ .cfi_rel_offset \x, \z
+ .cfi_rel_offset \y, \z + 8
+ .endif
+.endm
+
+.macro rstrreg x, y, z=nil
+ .ifeqs "\z", "nil"
+ ldr \x, [sp, \y]
+ .cfi_restore \x
+ .else
+ ldp \x, \y, [sp, #\z]
+ .cfi_restore \x
+ .cfi_restore \y
+ .endif
+.endm
+
+.macro endprologue
+.endm
+
+// cmov RD, RN, CC: set RD to RN if CC is satisfied, otherwise do nothing
+.macro cmov rd, rn, cc
+ csel \rd, \rn, \rd, \cc
+.endm
+
+// Notational improvement: write `csel.CC' etc., rather than `csel ..., CC'.
+#define _COND(_) \
+ _(eq) _(ne) _(cs) _(cc) _(vs) _(vc) _(mi) _(pl) \
+ _(ge) _(lt) _(gt) _(le) _(hi) _(ls) _(al) _(nv) \
+ _(hs) _(lo)
+#define _INST(_) \
+ _(ccmp) _(ccmn) \
+ _(csel) _(cmov) \
+ _(csinc) _(cinc) _(cset) \
+ _(csneg) _(cneg) \
+ _(csinv) _(cinv) _(csetm)
+#define _CONDVAR(cc) _definstvar cc;
+#define _INSTVARS(inst) \
+ .macro _definstvar cc; \
+ .macro inst.\cc args:vararg; inst \args, \cc; .endm; \
+ .endm; \
+ _COND(_CONDVAR); \
+ .purgem _definstvar;
+ _INST(_INSTVARS)
+#undef _COND
+#undef _INST
+#undef _CONDVAR
+#undef _INSTVARS
+
+// Flag bits for `ccmp' and friends.
+#define CCMP_N 8
+#define CCMP_Z 4
+#define CCMP_C 2
+#define CCMP_V 1
+
+// Flag settings for satisfying conditions.
+#define CCMP_MI CCMP_N
+#define CCMP_PL 0
+#define CCMP_EQ CCMP_Z
+#define CCMP_NE 0
+#define CCMP_CS CCMP_C
+#define CCMP_HS CCMP_C
+#define CCMP_CC 0
+#define CCMP_LO 0
+#define CCMP_VS CCMP_V
+#define CCMP_VC 0
+#define CCMP_HI CCMP_C
+#define CCMP_LS 0
+#define CCMP_LT CCMP_N
+#define CCMP_GE 0
+#define CCMP_LE CCMP_N
+#define CCMP_GT 0
+
+#endif
+
+///--------------------------------------------------------------------------