/// -*- mode: asm; asm-comment-char: ?/ -*-
///
-/// Fancy SIMD implementation of Salsa20
+/// Common definitions for asesembler source files
///
/// (c) 2015 Straylight/Edgeware
///
/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
/// MA 02111-1307, USA.
+#ifndef CATACOMB_ASM_COMMON_H
+#define CATACOMB_ASM_COMMON_H
+
///--------------------------------------------------------------------------
/// General definitions.
# define INTADDR__1(addr, got) addr
#endif
-// Permutations for SIMD instructions. SHUF(D, C, B, A) is an immediate,
-// suitable for use in `pshufd' or `shufpd', which copies element D
-// (0 <= D < 4) of the source to element 3 of the destination, element C to
-// element 2, element B to element 1, and element A to element 0.
-#define SHUF(d, c, b, a) (64*(d) + 16*(c) + 4*(b) + (a))
+// Permutations for SIMD instructions. SHUF(A, B, C, D) is an immediate,
+// suitable for use in `pshufd' or `shufpd', which copies element A
+// (0 <= A < 4) of the source to element 0 of the destination, element B to
+// element 1, element C to element 2, and element D to element 3.
+#define SHUF(a, b, c, d) ((a) + 4*(b) + 16*(c) + 64*(d))
// Map register names to their individual pieces.
# define _DECOR_abcd_q(reg) r##reg##x
#endif
-#define _DECOR_xp_b(reg) reg##l
#define _DECOR_xp_w(reg) reg
#define _DECOR_xp_d(reg) e##reg
#if CPUFAM_AMD64
+# define _DECOR_xp_b(reg) reg##l
# define _DECOR_xp_q(reg) r##reg
#endif
# define _DECOR_rn_r(reg) reg
#endif
+#define _DECOR_mem_b(addr) byte ptr addr
+#define _DECOR_mem_w(addr) word ptr addr
+#define _DECOR_mem_d(addr) dword ptr addr
+#if CPUFAM_AMD64
+# define _DECOR_mem_q(addr) qword ptr addr
+#endif
+
+#define _DECOR_imm_b(imm) byte imm
+#define _DECOR_imm_w(imm) word imm
+#define _DECOR_imm_d(imm) dword imm
+#if CPUFAM_AMD64
+# define _DECOR_imm_q(imm) qword imm
+#endif
+
#if CPUFAM_X86
# define _DECOR_abcd_r(reg) e##reg##x
# define _DECOR_xp_r(reg) e##reg
# define _DECOR_ip_r(reg) e##reg
+# define _DECOR_mem_r(addr) dword ptr addr
+# define _DECOR_imm_r(imm) dword imm
#endif
#if CPUFAM_AMD64
# define _DECOR_abcd_r(reg) r##reg##x
# define _DECOR_xp_r(reg) r##reg
# define _DECOR_ip_r(reg) r##reg
-#endif
-
-#define _DECOR_mem_b(addr) byte ptr addr
-#define _DECOR_mem_w(addr) word ptr addr
-#define _DECOR_mem_d(addr) dword ptr addr
-#if CPUFAM_AMD64
-# define _DECOR_mem_q(addr) qword ptr addr
+# define _DECOR_mem_r(addr) qword ptr addr
+# define _DECOR_imm_r(imm) qword imm
#endif
// R_r(decor) applies decoration decor to register r, which is an internal
// register name. The internal register names are: `ip', `a', `b', `c', `d',
// `si', `di', `bp', `sp', `r8'--`r15'.
+#define R_nil(decor) nil
#define R_ip(decor) _DECOR(ip, decor, ip)
#define R_a(decor) _DECOR(abcd, decor, a)
#define R_b(decor) _DECOR(abcd, decor, b)
// address addr (which should supply its own square-brackets).
#define MEM(decor, addr) _DECOR(mem, decor, addr)
+// Refer to an immediate datum of the type implied by decor.
+#define IMM(decor, imm) _DECOR(mem, decor, imm)
+
// Applies decoration decor to assembler-level register name reg.
#define _REGFORM(reg, decor) _GLUE(_REGFORM_, reg)(decor)
// assembler-level register name, in place of any decoration that register
// name has already.
+#define _REGFORM_nil(decor) R_nil(decor)
+
#define _REGFORM_ip(decor) R_ip(decor)
#define _REGFORM_eip(decor) R_ip(decor)
#endif
#define WHOLE(reg) _REGFORM(reg, r)
+// Macros for some common registers.
+#define AX R_a(r)
+#define BX R_b(r)
+#define CX R_c(r)
+#define DX R_d(r)
+#define SI R_si(r)
+#define DI R_di(r)
+#define BP R_bp(r)
+#define SP R_sp(r)
+
// Stack management and unwinding.
-.macro setfp fp, offset = 0
+.macro setfp fp=BP, offset=0
.if \offset == 0
- mov \fp, R_sp(r)
+ mov \fp, SP
#if __ELF__
.cfi_def_cfa_register \fp
#endif
.seh_setframe \fp, 0
#endif
.else
- lea \fp, [R_sp(r) + \offset]
+ lea \fp, [SP + \offset]
#if __ELF__
.cfi_def_cfa_register \fp
.cfi_adjust_cfa_offset -\offset
.macro dropfp; _dropfp \fp, \offset; .endm
.endm
-.macro _dropfp fp, offset = 0
+.macro _dropfp fp, offset=0
.if \offset == 0
- mov R_sp(r), \fp
+ mov SP, \fp
#if __ELF__
- .cfi_def_cfa_register R_sp(r)
+ .cfi_def_cfa_register SP
#endif
.else
- lea R_sp(r), [\fp - \offset]
+ lea SP, [\fp - \offset]
#if __ELF__
- .cfi_def_cfa_register R_sp(r)
+ .cfi_def_cfa_register SP
.cfi_adjust_cfa_offset +\offset
#endif
.endif
.endm
.macro stalloc n
- sub R_sp(r), \n
+ sub SP, \n
#if __ELF__
.cfi_adjust_cfa_offset +\n
#endif
.endm
.macro stfree n
- add R_sp(r), \n
+ add SP, \n
#if __ELF__
.cfi_adjust_cfa_offset -\n
#endif
.endm
.macro savexmm r, offset
- movdqa [R_sp(r) + \offset], \r
+ movdqa [SP + \offset], \r
#if ABI_WIN && CPUFAM_AMD64
.seh_savexmm \r, \offset
#endif
.endm
.macro rstrxmm r, offset
- movdqa \r, [R_sp(r) + \offset]
+ movdqa \r, [SP + \offset]
.endm
.macro endprologue
#endif
-#if CPUFAM_X86
-
-.macro _reg.0
- // Stash GP registers and establish temporary stack frame.
- pushfd
- push eax
- push ecx
- push edx
- push ebp
- mov ebp, esp
- and esp, ~15
- sub esp, 512
- fxsave [esp]
-.endm
-
-.macro _reg.1
-.endm
-
-.macro _reg.2
-.endm
-
-.macro _reg.3 fmt
- // Print FMT and the other established arguments.
- lea eax, .L$_reg$msg.\@
- push eax
- call printf
- jmp .L$_reg$cont.\@
-.L$_reg$msg.\@:
- .ascii ";; \fmt\n\0"
-.L$_reg$cont.\@:
- mov eax, ebp
- and eax, ~15
- sub eax, 512
- fxrstor [eax]
- mov esp, ebp
- pop ebp
- pop edx
- pop ecx
- pop eax
- popfd
-.endm
-
-.macro msg msg
- _reg.0
- _reg.1
- _reg.2
- _reg.3 "\msg"
-.endm
-
-.macro reg r, msg
- _reg.0
- .ifeqs "\r", "esp"
- lea eax, [ebp + 20]
- push eax
- .else
- .ifeqs "\r", "ebp"
- push [ebp]
- .else
- push \r
- .endif
- .endif
- _reg.1
- _reg.2
- _reg.3 "\msg: \r = %08x"
-.endm
-
-.macro xmmreg r, msg
- _reg.0
- _reg.1
- _reg.2
- movdqu xmm0, \r
- pshufd xmm0, xmm0, 0x1b
- sub esp, 16
- movdqa [esp], xmm0
- _reg.3 "\msg: \r = %08x %08x %08x %08x"
-.endm
-
-.macro mmreg r, msg
- _reg.0
- _reg.1
- _reg.2
- pshufw \r, \r, 0x4e
- sub esp, 8
- movq [esp], \r
- _reg.3 "\msg: \r = %08x %08x"
-.endm
-
-.macro freg i, msg
- _reg.0
- _reg.1
- _reg.2
- finit
- fldt [esp + 32 + 16*\i]
- sub esp, 12
- fstpt [esp]
- _reg.3 "\msg: st(\i) = %.20Lg"
-.endm
-
-.macro fxreg i, msg
- _reg.0
- _reg.1
- _reg.2
- finit
- fldt [esp + 32 + 16*\i]
- sub esp, 12
- fstpt [esp]
- _reg.3 "\msg: st(\i) = %La"
-.endm
-
-#endif
-
///--------------------------------------------------------------------------
/// ARM-specific hacking.
#if WANT_PIC
ldr\cond \reg, .L$_leaextq$\@
.L$_leaextq_pc$\@:
- .if .L$_pcoff == 8
+ .if .L$_pcoff == 8
ldr\cond \reg, [pc, \reg]
- .else
+ .else
add\cond \reg, pc
ldr\cond \reg, [\reg]
- .endif
+ .endif
_LIT
.balign 4
.L$_leaextq$\@:
#endif
.endm
+.macro vzero vz=q15
+ // Set VZ (default q15) to zero.
+ vmov.u32 \vz, #0
+.endm
+
+.macro vshl128 vd, vn, nbit, vz=q15
+ // Set VD to VN shifted left by NBIT. Assume VZ (default q15) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ vext.8 \vd, \vz, \vn, #16 - (\nbit >> 3)
+.endm
+
+.macro vshr128 vd, vn, nbit, vz=q15
+ // Set VD to VN shifted right by NBIT. Assume VZ (default q15) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ vext.8 \vd, \vn, \vz, #\nbit >> 3
+.endm
+
// Apply decoration decor to register name reg.
#define _REGFORM(reg, decor) _GLUE(_REGFORM_, reg)(decor)
// Internal macros: `_REGFORM_r(decor)' applies decoration decor to register
// name r.
+#define _REGFORM_nil(decor) nil
+
#define _REGFORM_s0(decor) _DECOR(s, decor, 0)
#define _REGFORM_s1(decor) _DECOR(s, decor, 1)
#define _REGFORM_s2(decor) _DECOR(s, decor, 2)
#define QQ(qlo, qhi) D0(qlo)-D1(qhi)
// Stack management and unwinding.
-.macro setfp fp, offset = 0
+.macro setfp fp=r11, offset=0
.if \offset == 0
mov \fp, sp
.setfp \fp, sp
.L$_frameptr_p = -1
.endm
-.macro _dropfp fp, offset = 0
+.macro _dropfp fp, offset=0
.if \offset == 0
mov sp, \fp
.else
.endm
.macro pushreg rr:vararg
- stmfd sp!, {\rr}
+ push {\rr}
.save {\rr}
.endm
.macro popreg rr:vararg
- ldmfd sp!, {\rr}
+ pop {\rr}
.endm
.macro pushvfp rr:vararg
#endif
///--------------------------------------------------------------------------
+/// AArch64-specific hacking.
+
+#if CPUFAM_ARM64
+
+// Set the function hooks.
+#define FUNC_PREHOOK(_) .balign 4
+#define FUNC_POSTHOOK(_) .cfi_startproc; .L$_prologue_p = -1
+#define ENDFUNC_HOOK(_) .cfi_endproc
+
+// Call external subroutine at ADDR, possibly via PLT.
+.macro callext addr
+ bl \addr
+.endm
+
+// Load address of external symbol ADDR into REG.
+.macro leaext reg, addr
+#if WANT_PIC
+ adrp \reg, :got:\addr
+ ldr \reg, [\reg, #:got_lo12:\addr]
+#else
+ adrp \reg, \addr
+ add \reg, \reg, #:lo12:\addr
+#endif
+.endm
+
+.macro vzero vz=v31
+ // Set VZ (default v31) to zero.
+ dup \vz\().4s, wzr
+.endm
+
+.macro vshl128 vd, vn, nbit, vz=v31
+ // Set VD to VN shifted left by NBIT. Assume VZ (default v31) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ ext \vd\().16b, \vz\().16b, \vn\().16b, #16 - (\nbit >> 3)
+.endm
+
+.macro vshr128 vd, vn, nbit, vz=v31
+ // Set VD to VN shifted right by NBIT. Assume VZ (default v31) is
+ // all-bits-zero. NBIT must be a multiple of 8.
+ .if \nbit&3 != 0
+ .error "shift quantity must be whole number of bytes"
+ .endif
+ ext \vd\().16b, \vn\().16b, \vz\().16b, #\nbit >> 3
+.endm
+
+// Register class conversions.
+#define _GPNUM_w0 0
+#define _GPNUM_w1 1
+#define _GPNUM_w2 2
+#define _GPNUM_w3 3
+#define _GPNUM_w4 4
+#define _GPNUM_w5 5
+#define _GPNUM_w6 6
+#define _GPNUM_w7 7
+#define _GPNUM_w8 8
+#define _GPNUM_w9 9
+#define _GPNUM_w10 10
+#define _GPNUM_w11 11
+#define _GPNUM_w12 12
+#define _GPNUM_w13 13
+#define _GPNUM_w14 14
+#define _GPNUM_w15 15
+#define _GPNUM_w16 16
+#define _GPNUM_w17 17
+#define _GPNUM_w18 18
+#define _GPNUM_w19 19
+#define _GPNUM_w20 20
+#define _GPNUM_w21 21
+#define _GPNUM_w22 22
+#define _GPNUM_w23 23
+#define _GPNUM_w24 24
+#define _GPNUM_w25 25
+#define _GPNUM_w26 26
+#define _GPNUM_w27 27
+#define _GPNUM_w28 28
+#define _GPNUM_w29 29
+#define _GPNUM_w30 30
+#define _GPNUM_wzr zr
+#define _GPNUM_wsp sp
+
+#define _GPNUM_x0 0
+#define _GPNUM_x1 1
+#define _GPNUM_x2 2
+#define _GPNUM_x3 3
+#define _GPNUM_x4 4
+#define _GPNUM_x5 5
+#define _GPNUM_x6 6
+#define _GPNUM_x7 7
+#define _GPNUM_x8 8
+#define _GPNUM_x9 9
+#define _GPNUM_x10 10
+#define _GPNUM_x11 11
+#define _GPNUM_x12 12
+#define _GPNUM_x13 13
+#define _GPNUM_x14 14
+#define _GPNUM_x15 15
+#define _GPNUM_x16 16
+#define _GPNUM_x17 17
+#define _GPNUM_x18 18
+#define _GPNUM_x19 19
+#define _GPNUM_x20 20
+#define _GPNUM_x21 21
+#define _GPNUM_x22 22
+#define _GPNUM_x23 23
+#define _GPNUM_x24 24
+#define _GPNUM_x25 25
+#define _GPNUM_x26 26
+#define _GPNUM_x27 27
+#define _GPNUM_x28 28
+#define _GPNUM_x29 29
+#define _GPNUM_x30 30
+#define _GPNUM_xzr zr
+#define _GPNUM_sp sp
+#define _GPNUM_xsp sp
+#define xsp sp
+
+#define _VNUM_b0 0
+#define _VNUM_b1 1
+#define _VNUM_b2 2
+#define _VNUM_b3 3
+#define _VNUM_b4 4
+#define _VNUM_b5 5
+#define _VNUM_b6 6
+#define _VNUM_b7 7
+#define _VNUM_b8 8
+#define _VNUM_b9 9
+#define _VNUM_b10 10
+#define _VNUM_b11 11
+#define _VNUM_b12 12
+#define _VNUM_b13 13
+#define _VNUM_b14 14
+#define _VNUM_b15 15
+#define _VNUM_b16 16
+#define _VNUM_b17 17
+#define _VNUM_b18 18
+#define _VNUM_b19 19
+#define _VNUM_b20 20
+#define _VNUM_b21 21
+#define _VNUM_b22 22
+#define _VNUM_b23 23
+#define _VNUM_b24 24
+#define _VNUM_b25 25
+#define _VNUM_b26 26
+#define _VNUM_b27 27
+#define _VNUM_b28 28
+#define _VNUM_b29 29
+#define _VNUM_b30 30
+#define _VNUM_b31 31
+
+#define _VNUM_h0 0
+#define _VNUM_h1 1
+#define _VNUM_h2 2
+#define _VNUM_h3 3
+#define _VNUM_h4 4
+#define _VNUM_h5 5
+#define _VNUM_h6 6
+#define _VNUM_h7 7
+#define _VNUM_h8 8
+#define _VNUM_h9 9
+#define _VNUM_h10 10
+#define _VNUM_h11 11
+#define _VNUM_h12 12
+#define _VNUM_h13 13
+#define _VNUM_h14 14
+#define _VNUM_h15 15
+#define _VNUM_h16 16
+#define _VNUM_h17 17
+#define _VNUM_h18 18
+#define _VNUM_h19 19
+#define _VNUM_h20 20
+#define _VNUM_h21 21
+#define _VNUM_h22 22
+#define _VNUM_h23 23
+#define _VNUM_h24 24
+#define _VNUM_h25 25
+#define _VNUM_h26 26
+#define _VNUM_h27 27
+#define _VNUM_h28 28
+#define _VNUM_h29 29
+#define _VNUM_h30 30
+#define _VNUM_h31 31
+
+#define _VNUM_s0 0
+#define _VNUM_s1 1
+#define _VNUM_s2 2
+#define _VNUM_s3 3
+#define _VNUM_s4 4
+#define _VNUM_s5 5
+#define _VNUM_s6 6
+#define _VNUM_s7 7
+#define _VNUM_s8 8
+#define _VNUM_s9 9
+#define _VNUM_s10 10
+#define _VNUM_s11 11
+#define _VNUM_s12 12
+#define _VNUM_s13 13
+#define _VNUM_s14 14
+#define _VNUM_s15 15
+#define _VNUM_s16 16
+#define _VNUM_s17 17
+#define _VNUM_s18 18
+#define _VNUM_s19 19
+#define _VNUM_s20 20
+#define _VNUM_s21 21
+#define _VNUM_s22 22
+#define _VNUM_s23 23
+#define _VNUM_s24 24
+#define _VNUM_s25 25
+#define _VNUM_s26 26
+#define _VNUM_s27 27
+#define _VNUM_s28 28
+#define _VNUM_s29 29
+#define _VNUM_s30 30
+#define _VNUM_s31 31
+
+#define _VNUM_d0 0
+#define _VNUM_d1 1
+#define _VNUM_d2 2
+#define _VNUM_d3 3
+#define _VNUM_d4 4
+#define _VNUM_d5 5
+#define _VNUM_d6 6
+#define _VNUM_d7 7
+#define _VNUM_d8 8
+#define _VNUM_d9 9
+#define _VNUM_d10 10
+#define _VNUM_d11 11
+#define _VNUM_d12 12
+#define _VNUM_d13 13
+#define _VNUM_d14 14
+#define _VNUM_d15 15
+#define _VNUM_d16 16
+#define _VNUM_d17 17
+#define _VNUM_d18 18
+#define _VNUM_d19 19
+#define _VNUM_d20 20
+#define _VNUM_d21 21
+#define _VNUM_d22 22
+#define _VNUM_d23 23
+#define _VNUM_d24 24
+#define _VNUM_d25 25
+#define _VNUM_d26 26
+#define _VNUM_d27 27
+#define _VNUM_d28 28
+#define _VNUM_d29 29
+#define _VNUM_d30 30
+#define _VNUM_d31 31
+
+#define _VNUM_q0 0
+#define _VNUM_q1 1
+#define _VNUM_q2 2
+#define _VNUM_q3 3
+#define _VNUM_q4 4
+#define _VNUM_q5 5
+#define _VNUM_q6 6
+#define _VNUM_q7 7
+#define _VNUM_q8 8
+#define _VNUM_q9 9
+#define _VNUM_q10 10
+#define _VNUM_q11 11
+#define _VNUM_q12 12
+#define _VNUM_q13 13
+#define _VNUM_q14 14
+#define _VNUM_q15 15
+#define _VNUM_q16 16
+#define _VNUM_q17 17
+#define _VNUM_q18 18
+#define _VNUM_q19 19
+#define _VNUM_q20 20
+#define _VNUM_q21 21
+#define _VNUM_q22 22
+#define _VNUM_q23 23
+#define _VNUM_q24 24
+#define _VNUM_q25 25
+#define _VNUM_q26 26
+#define _VNUM_q27 27
+#define _VNUM_q28 28
+#define _VNUM_q29 29
+#define _VNUM_q30 30
+#define _VNUM_q31 31
+
+#define _VNUM_v0 0
+#define _VNUM_v1 1
+#define _VNUM_v2 2
+#define _VNUM_v3 3
+#define _VNUM_v4 4
+#define _VNUM_v5 5
+#define _VNUM_v6 6
+#define _VNUM_v7 7
+#define _VNUM_v8 8
+#define _VNUM_v9 9
+#define _VNUM_v10 10
+#define _VNUM_v11 11
+#define _VNUM_v12 12
+#define _VNUM_v13 13
+#define _VNUM_v14 14
+#define _VNUM_v15 15
+#define _VNUM_v16 16
+#define _VNUM_v17 17
+#define _VNUM_v18 18
+#define _VNUM_v19 19
+#define _VNUM_v20 20
+#define _VNUM_v21 21
+#define _VNUM_v22 22
+#define _VNUM_v23 23
+#define _VNUM_v24 24
+#define _VNUM_v25 25
+#define _VNUM_v26 26
+#define _VNUM_v27 27
+#define _VNUM_v28 28
+#define _VNUM_v29 29
+#define _VNUM_v30 30
+#define _VNUM_v31 31
+
+#define _VNUM_z0 0
+#define _VNUM_z1 1
+#define _VNUM_z2 2
+#define _VNUM_z3 3
+#define _VNUM_z4 4
+#define _VNUM_z5 5
+#define _VNUM_z6 6
+#define _VNUM_z7 7
+#define _VNUM_z8 8
+#define _VNUM_z9 9
+#define _VNUM_z10 10
+#define _VNUM_z11 11
+#define _VNUM_z12 12
+#define _VNUM_z13 13
+#define _VNUM_z14 14
+#define _VNUM_z15 15
+#define _VNUM_z16 16
+#define _VNUM_z17 17
+#define _VNUM_z18 18
+#define _VNUM_z19 19
+#define _VNUM_z20 20
+#define _VNUM_z21 21
+#define _VNUM_z22 22
+#define _VNUM_z23 23
+#define _VNUM_z24 24
+#define _VNUM_z25 25
+#define _VNUM_z26 26
+#define _VNUM_z27 27
+#define _VNUM_z28 28
+#define _VNUM_z29 29
+#define _VNUM_z30 30
+#define _VNUM_z31 31
+
+#define _RDECOR(cls, pre, r) GLUE(pre, _##cls##NUM_##r)
+#define W(r) _RDECOR(GP, w, r)
+#define X(r) _RDECOR(GP, x, r)
+#define B(r) _RDECOR(V, b, r)
+#define H(r) _RDECOR(V, h, r)
+#define S(r) _RDECOR(V, s, r)
+#define D(r) _RDECOR(V, d, r)
+#define Q(r) _RDECOR(V, q, r)
+#define V(r) _RDECOR(V, v, r)
+#define Z(r) _RDECOR(V, z, r)
+
+// Stack management and unwinding.
+.macro setfp fp=x29, offset=0
+ // If you're just going through the motions with a fixed-size stack frame,
+ // then you want to say `add x29, sp, #OFFSET' directly, which will avoid
+ // pointlessly restoring sp later.
+ .if \offset == 0
+ mov \fp, sp
+ .cfi_def_cfa_register \fp
+ .else
+ add \fp, sp, #\offset
+ .cfi_def_cfa_register \fp
+ .cfi_adjust_cfa_offset -\offset
+ .endif
+ .macro dropfp; _dropfp \fp, \offset; .endm
+ .L$_frameptr_p = -1
+.endm
+
+.macro _dropfp fp, offset=0
+ .if \offset == 0
+ mov sp, \fp
+ .cfi_def_cfa_register sp
+ .else
+ sub sp, \fp, #\offset
+ .cfi_def_cfa_register sp
+ .cfi_adjust_cfa_offset +\offset
+ .endif
+ .purgem dropfp
+ .L$_frameptr_p = 0
+.endm
+
+.macro stalloc n
+ sub sp, sp, #\n
+ .cfi_adjust_cfa_offset +\n
+.endm
+
+.macro stfree n
+ add sp, sp, #\n
+ .cfi_adjust_cfa_offset -\n
+.endm
+
+.macro pushreg x, y=nil
+ .ifeqs "\y", "nil"
+ str \x, [sp, #-16]!
+ .cfi_adjust_cfa_offset +16
+ .cfi_rel_offset \x, 0
+ .else
+ stp \x, \y, [sp, #-16]!
+ .cfi_adjust_cfa_offset +16
+ .cfi_rel_offset \x, 0
+ .cfi_rel_offset \y, 8
+ .endif
+.endm
+
+.macro popreg x, y=nil
+ .ifeqs "\y", "nil"
+ ldr \x, [sp], #16
+ .cfi_restore \x
+ .cfi_adjust_cfa_offset -16
+ .else
+ ldp \x, \y, [sp], #16
+ .cfi_restore \x
+ .cfi_restore \y
+ .cfi_adjust_cfa_offset -16
+ .endif
+.endm
+
+.macro savereg x, y, z=nil
+ .ifeqs "\z", "nil"
+ str \x, [sp, \y]
+ .cfi_rel_offset \x, \y
+ .else
+ stp \x, \y, [sp, #\z]
+ .cfi_rel_offset \x, \z
+ .cfi_rel_offset \y, \z + 8
+ .endif
+.endm
+
+.macro rstrreg x, y, z=nil
+ .ifeqs "\z", "nil"
+ ldr \x, [sp, \y]
+ .cfi_restore \x
+ .else
+ ldp \x, \y, [sp, #\z]
+ .cfi_restore \x
+ .cfi_restore \y
+ .endif
+.endm
+
+.macro endprologue
+.endm
+
+// cmov RD, RN, CC: set RD to RN if CC is satisfied, otherwise do nothing
+.macro cmov rd, rn, cc
+ csel \rd, \rn, \rd, \cc
+.endm
+
+// Notational improvement: write `csel.CC' etc., rather than `csel ..., CC'.
+#define _COND(_) \
+ _(eq) _(ne) _(cs) _(cc) _(vs) _(vc) _(mi) _(pl) \
+ _(ge) _(lt) _(gt) _(le) _(hi) _(ls) _(al) _(nv) \
+ _(hs) _(lo)
+#define _INST(_) \
+ _(ccmp) _(ccmn) \
+ _(csel) _(cmov) \
+ _(csinc) _(cinc) _(cset) \
+ _(csneg) _(cneg) \
+ _(csinv) _(cinv) _(csetm)
+#define _CONDVAR(cc) _definstvar cc;
+#define _INSTVARS(inst) \
+ .macro _definstvar cc; \
+ .macro inst.\cc args:vararg; inst \args, \cc; .endm; \
+ .endm; \
+ _COND(_CONDVAR); \
+ .purgem _definstvar;
+ _INST(_INSTVARS)
+#undef _COND
+#undef _INST
+#undef _CONDVAR
+#undef _INSTVARS
+
+// Flag bits for `ccmp' and friends.
+#define CCMP_N 8
+#define CCMP_Z 4
+#define CCMP_C 2
+#define CCMP_V 1
+
+// Flag settings for satisfying conditions.
+#define CCMP_MI CCMP_N
+#define CCMP_PL 0
+#define CCMP_EQ CCMP_Z
+#define CCMP_NE 0
+#define CCMP_CS CCMP_C
+#define CCMP_HS CCMP_C
+#define CCMP_CC 0
+#define CCMP_LO 0
+#define CCMP_VS CCMP_V
+#define CCMP_VC 0
+#define CCMP_HI CCMP_C
+#define CCMP_LS 0
+#define CCMP_LT CCMP_N
+#define CCMP_GE 0
+#define CCMP_LE CCMP_N
+#define CCMP_GT 0
+
+#endif
+
+///--------------------------------------------------------------------------
/// Final stuff.
// Default values for the various hooks.
#endif
#ifndef F
-# define F(name) name
+# ifdef SYM_USCORE
+# define F(name) _##name
+# else
+# define F(name) name
+# endif
#endif
#ifndef TYPE_FUNC
# define SIZE_OBJ(name)
#endif
-#if __ELF__ && defined(WANT_EXECUTABLE_STACK)
+#if __ELF__ && !defined(WANT_EXECUTABLE_STACK)
.pushsection .note.GNU-stack, "", _SECTTY(progbits)
.popsection
#endif
///----- That's all, folks --------------------------------------------------
+
+#endif