From 26e182fc3ae2a40dc7d52bab2318d8d1837dfeee Mon Sep 17 00:00:00 2001 From: Mark Wooding Date: Thu, 26 May 2016 09:26:09 +0100 Subject: [PATCH] configure.ac, symm/rijndael*: Use ARMv8 AES instructions where available. This matches the x86 AESNI support, but is less mad. --- base/dispatch.c | 14 +- base/dispatch.h | 3 +- configure.ac | 34 +++++ symm/Makefile.am | 5 + symm/rijndael-arm-crypto.S | 352 +++++++++++++++++++++++++++++++++++++++++++++ symm/rijndael-base.c | 7 + symm/rijndael.c | 12 ++ 7 files changed, 424 insertions(+), 3 deletions(-) create mode 100644 symm/rijndael-arm-crypto.S diff --git a/base/dispatch.c b/base/dispatch.c index 875c1260..b70d44b6 100644 --- a/base/dispatch.c +++ b/base/dispatch.c @@ -229,6 +229,11 @@ struct auxentry { unsigned long type; union auxval value; }; # define WANT_AT_HWCAP(_) _(AT_HWCAP, u, hwcap) #endif +#if defined(AT_HWCAP2) && CPUFAM_ARMEL +# define WANT_ANY 1 +# define WANT_AT_HWCAP2(_) _(AT_HWCAP2, u, hwcap2) +#endif + /* If we couldn't find any interesting entries then we can switch all of this * machinery off. Also do that if we have no means for atomic updates. */ @@ -264,12 +269,14 @@ static unsigned hwcaps = 0; */ #if CPUFAM_ARMEL # define WANTAUX(_) \ - WANT_AT_HWCAP(_) + WANT_AT_HWCAP(_) \ + WANT_AT_HWCAP2(_) # define CAPMAP(_) \ _(ARM_VFP, "arm:vfp") \ _(ARM_NEON, "arm:neon") \ _(ARM_V4, "arm:v4") \ - _(ARM_D32, "arm:d32") + _(ARM_D32, "arm:d32") \ + _(ARM_AES, "arm:aes") #endif /* Build the bitmask for `hwcaps' from the `CAPMAP' list. */ @@ -380,6 +387,9 @@ static void probe_hwcaps(void) if (probed.hwcap & HWCAP_NEON) hw |= HF_ARM_NEON; if (probed.hwcap & HWCAP_VFPD32) hw |= HF_ARM_D32; if (probed.hwcap & HWCAP_VFPv4) hw |= HF_ARM_V4; +# ifdef HWCAP2_AES + if (probed.hwcap2 & HWCAP2_AES) hw |= HF_ARM_AES; +# endif #endif /* Store the bitmask of features we probed for everyone to see. */ diff --git a/base/dispatch.h b/base/dispatch.h index 1983a5a4..f778068c 100644 --- a/base/dispatch.h +++ b/base/dispatch.h @@ -180,7 +180,8 @@ enum { CPUFEAT_ARM_NEON, /* Advanced SIMD (v1 or v2) */ CPUFEAT_ARM_V4, /* VFPv4 and/or SIMD v2 */ CPUFEAT_ARM_D32, /* 32 double registers, not 16 */ - CPUFEAT_X86_RDRAND /* Built-in entropy source */ + CPUFEAT_X86_RDRAND, /* Built-in entropy source */ + CPUFEAT_ARM_AES /* AES instructions */ }; extern int cpu_feature_p(int /*feat*/); diff --git a/configure.ac b/configure.ac index 1802a8a0..86f7944a 100644 --- a/configure.ac +++ b/configure.ac @@ -172,6 +172,40 @@ case $CPUFAM in esac dnl-------------------------------------------------------------------------- +dnl CPU-specific assembler features. + +AC_LANG([CPPAS]) + +case $CPUFAM in + armel) + AC_CACHE_CHECK( + [whether the assembler understands ARMv8 crypto extensions], + [mdw_cv_as_armv8_crypto], + [AC_COMPILE_IFELSE([AC_LANG_SOURCE([[ + .arch armv8-a + .fpu crypto-neon-fp-armv8 + + .text + .globl foo +foo: + vldmia r0, {d0-d3} + aese.8 q0, q1 + aesmc.8 q0, q0 + vstmia r0, {d0, d1} + bx r14]])], + [mdw_cv_as_armv8_crypto=yes], + [mdw_cv_as_armv8_crypto=no])]) + ;; +esac + +: ${mdw_cv_as_armv8_crypto=no} +AM_CONDITIONAL([HAVE_AS_ARMV8_CRYPTO], [test $mdw_cv_as_armv8_crypto = yes]) +if test $mdw_cv_as_armv8_crypto = yes; then + AC_DEFINE([HAVE_AS_ARMV8_CRYPTO], [1], + [Define to 1 if your ARM assembler supports the ARMv8 crypto instructions.]) +fi + +dnl-------------------------------------------------------------------------- dnl C programming environment. AC_LANG([C]) diff --git a/symm/Makefile.am b/symm/Makefile.am index e56b2a43..4539e0a3 100644 --- a/symm/Makefile.am +++ b/symm/Makefile.am @@ -188,6 +188,11 @@ endif if CPUFAM_AMD64 libsymm_la_SOURCES += rijndael-x86ish-aesni.S endif +if HAVE_AS_ARMV8_CRYPTO +if CPUFAM_ARMEL +libsymm_la_SOURCES += rijndael-arm-crypto.S +endif +endif nodist_libsymm_la_SOURCES += ../precomp/symm/rijndael-tab.c PRECOMPS += $(precomp)/symm/rijndael-tab.c PRECOMP_PROGS += rijndael-mktab diff --git a/symm/rijndael-arm-crypto.S b/symm/rijndael-arm-crypto.S new file mode 100644 index 00000000..d33cac6b --- /dev/null +++ b/symm/rijndael-arm-crypto.S @@ -0,0 +1,352 @@ +/// -*- mode: asm; asm-comment-char: ?/ -*- +/// +/// ARM crypto-extension-based implementation of Rijndael +/// +/// (c) 2016 Straylight/Edgeware +/// + +///----- Licensing notice --------------------------------------------------- +/// +/// This file is part of Catacomb. +/// +/// Catacomb is free software; you can redistribute it and/or modify +/// it under the terms of the GNU Library General Public License as +/// published by the Free Software Foundation; either version 2 of the +/// License, or (at your option) any later version. +/// +/// Catacomb is distributed in the hope that it will be useful, +/// but WITHOUT ANY WARRANTY; without even the implied warranty of +/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/// GNU Library General Public License for more details. +/// +/// You should have received a copy of the GNU Library General Public +/// License along with Catacomb; if not, write to the Free +/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, +/// MA 02111-1307, USA. + +///-------------------------------------------------------------------------- +/// External definitions. + +#include "config.h" +#include "asm-common.h" + + .globl F(abort) + .globl F(rijndael_rcon) + +///-------------------------------------------------------------------------- +/// Main code. + + .arch armv8-a + .fpu crypto-neon-fp-armv8 + +/// The ARM crypto extension implements a little-endian version of AES +/// (though the manual doesn't actually spell this out and you have to +/// experiment), but Catacomb's internal interface presents as big-endian so +/// as to work better with things like GCM. We therefore maintain the round +/// keys in little-endian form, and have to end-swap blocks in and out. +/// +/// For added amusement, the crypto extension doesn't implement the larger- +/// block versions of Rijndael, so we have to end-swap the keys if we're +/// preparing for one of those. + + // Useful constants. + .equ maxrounds, 16 // maximum number of rounds + .equ maxblksz, 32 // maximum block size, in bytes + .equ kbufsz, maxblksz*(maxrounds + 1) // size of a key-schedule buffer + + // Context structure. + .equ nr, 0 // number of rounds + .equ w, nr + 4 // encryption key words + .equ wi, w + kbufsz // decryption key words + +///-------------------------------------------------------------------------- +/// Key setup. + +FUNC(rijndael_setup_arm_crypto) + + // Arguments: + // r0 = pointer to context + // r1 = block size in words + // r2 = pointer to key material + // r3 = key size in words + + stmfd sp!, {r4-r9, r14} + + // The initial round key material is taken directly from the input + // key, so copy it over. Unfortunately, the key material is not + // guaranteed to be aligned in any especially useful way, so we must + // sort this out. + add r9, r0, #w + mov r14, r3 + ands r4, r2, #3 + beq 1f + mov r4, r4, lsl #3 + rsb r5, r4, #32 + bic r2, r2, #3 + ldr r6, [r2], #4 + +0: ldr r7, [r2], #4 + mov r6, r6, lsr r4 + orr r6, r7, lsl r5 + str r6, [r9], #4 + mov r6, r7 + subs r14, r14, #1 + bhi 0b + b 9f + +1: ldr r6, [r2], #4 + str r6, [r9], #4 + subs r14, r14, #1 + bhi 1b + + // Find out other useful things and prepare for the main loop. + ldr r7, [r0, #nr] // number of rounds + mla r2, r1, r7, r1 // total key size in words + ldr r4, [r9, #-4] // most recent key word + leaextq r5, rijndael_rcon // round constants + sub r8, r2, r3 // minus what we've copied already + veor q1, q1 // all-zero register for the key + add r8, r9, r8, lsl #2 // limit of the key buffer + + // Main key expansion loop. The first word of each key-length chunk + // needs special treatment. +9: ldrb r14, [r5], #1 // next round constant + ldr r6, [r9, -r3, lsl #2] + vdup.32 q0, r4 + aese.8 q0, q1 // effectively, just SubBytes + vmov.32 r4, d0[0] + eor r4, r14, r4, ror #8 + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // The next three words are simple. + ldr r6, [r9, -r3, lsl #2] + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // (Word 2...) + ldr r6, [r9, -r3, lsl #2] + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // (Word 3...) + ldr r6, [r9, -r3, lsl #2] + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // Word 4. If the key is /more/ than 6 words long, then we must + // apply a substitution here. + cmp r3, #5 + bcc 9b + ldr r6, [r9, -r3, lsl #2] + cmp r3, #7 + bcc 0f + vdup.32 q0, r4 + aese.8 q0, q1 // effectively, just SubBytes + vmov.32 r4, d0[0] +0: eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // (Word 5...) + cmp r3, #6 + bcc 9b + ldr r6, [r9, -r3, lsl #2] + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // (Word 6...) + cmp r3, #7 + bcc 9b + ldr r6, [r9, -r3, lsl #2] + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // (Word 7...) + cmp r3, #8 + bcc 9b + ldr r6, [r9, -r3, lsl #2] + eor r4, r4, r6 + str r4, [r9], #4 + cmp r9, r8 + bcs 8f + + // Must be done by now. + b 9b + + // Next job is to construct the decryption keys. The keys for the + // first and last rounds don't need to be mangled, but the remaining + // ones do -- and they all need to be reordered too. + // + // The plan of action, then, is to copy the final encryption round's + // keys into place first, then to do each of the intermediate rounds + // in reverse order, and finally do the first round. + // + // Do all the heavy lifting with NEON registers. The order we're + // doing this in means that it's OK if we read or write too much, and + // there's easily enough buffer space for the over-enthusiastic reads + // and writes because the context has space for 32-byte blocks, which + // is our maximum and an exact fit for two Q-class registers. +8: add r5, r0, #wi + add r4, r0, #w + add r4, r4, r2, lsl #2 + sub r4, r4, r1, lsl #2 // last round's keys + + // Copy the last encryption round's keys. + teq r1, #4 + vldmiaeq r4, {d0, d1} + vldmiane r4, {d0-d3} + vstmiaeq r5, {d0, d1} + vstmiane r5, {d0-d3} + + // Update the loop variables and stop if we've finished. +9: sub r4, r4, r1, lsl #2 + add r5, r5, r1, lsl #2 + subs r7, r7, #1 + beq 0f + + // Do another middle round's keys... + teq r1, #4 + vldmiaeq r4, {d0, d1} + vldmiane r4, {d0-d3} + aesimc.8 q0, q0 + vstmiaeq r5, {d0, d1} + beq 9b + aesimc.8 q1, q1 + vstmia r5, {d0-d3} + b 9b + + // Finally do the first encryption round. +0: teq r1, #4 + vldmiaeq r4, {d0, d1} + vldmiane r4, {d0-d3} + vstmiaeq r5, {d0, d1} + vstmiane r5, {d0-d3} + + // If the block size is not exactly four words then we must end-swap + // everything. We can use fancy NEON toys for this. + beq 0f + + // End-swap the encryption keys. + add r1, r0, #w + bl endswap_block + + // And the decryption keys + add r1, r0, #wi + bl endswap_block + + // All done. +0: ldmfd sp!, {r4-r9, pc} + +endswap_block: + // End-swap R2 words starting at R1. R1 is clobbered; R2 is not. + // It's OK to work in 16-byte chunks. + mov r4, r2 +0: vldmia r1, {d0, d1} + vrev32.8 q0, q0 + vstmia r1!, {d0, d1} + subs r4, r4, #4 + bhi 0b + bx r14 + +ENDFUNC + +///-------------------------------------------------------------------------- +/// Encrypting and decrypting blocks. + +FUNC(rijndael_eblk_arm_crypto) + + // Arguments: + // r0 = pointer to context + // r1 = pointer to input block + // r2 = pointer to output block + + // Set things up ready. + ldr r3, [r0, #nr] + add r0, r0, #w + vldmia r1, {d0, d1} + vrev32.8 q0, q0 + + // Dispatch according to the number of rounds. + add r3, r3, r3, lsl #1 + rsbs r3, r3, #3*14 + addcs pc, pc, r3, lsl #2 + callext F(abort) + + // The last round doesn't have MixColumns, so do it separately. + .rept 13 + vldmia r0!, {d2, d3} + aese.8 q0, q1 + aesmc.8 q0, q0 + .endr + + // Final round. + vldmia r0!, {d2, d3} + aese.8 q0, q1 + + // Final whitening. + vldmia r0!, {d2, d3} + veor q0, q1 + + // All done. + vrev32.8 q0, q0 + vstmia r2, {d0, d1} + bx r14 + +ENDFUNC + +FUNC(rijndael_dblk_arm_crypto) + + // Arguments: + // r0 = pointer to context + // r1 = pointer to input block + // r2 = pointer to output block + + // Set things up ready. + ldr r3, [r0, #nr] + add r0, r0, #wi + vldmia r1, {d0, d1} + vrev32.8 q0, q0 + + // Dispatch according to the number of rounds. + add r3, r3, r3, lsl #1 + rsbs r3, r3, #3*14 + addcs pc, pc, r3, lsl #2 + callext F(abort) + + // The last round doesn't have MixColumns, so do it separately. + .rept 13 + vldmia r0!, {d2, d3} + aesd.8 q0, q1 + aesimc.8 q0, q0 + .endr + + // Final round. + vldmia r0!, {d2, d3} + aesd.8 q0, q1 + + // Final whitening. + vldmia r0!, {d2, d3} + veor q0, q1 + + // All done. + vrev32.8 q0, q0 + vstmia r2, {d0, d1} + bx r14 + +ENDFUNC + +///----- That's all, folks -------------------------------------------------- diff --git a/symm/rijndael-base.c b/symm/rijndael-base.c index b5490c1d..01f781df 100644 --- a/symm/rijndael-base.c +++ b/symm/rijndael-base.c @@ -119,6 +119,9 @@ CPU_DISPATCH(static, EMPTY, void, setup, #if CPUFAM_X86 || CPUFAM_AMD64 extern setup__functype rijndael_setup_x86ish_aesni; #endif +#if CPUFAM_ARMEL && HAVE_AS_ARMV8_CRYPTO +extern setup__functype rijndael_setup_arm_crypto; +#endif static setup__functype *pick_setup(void) { @@ -126,6 +129,10 @@ static setup__functype *pick_setup(void) DISPATCH_PICK_COND(rijndael_setup, rijndael_setup_x86ish_aesni, cpu_feature_p(CPUFEAT_X86_AESNI)); #endif +#if CPUFAM_ARMEL && HAVE_AS_ARMV8_CRYPTO + DISPATCH_PICK_COND(rijndael_setup, rijndael_setup_arm_crypto, + cpu_feature_p(CPUFEAT_ARM_AES)); +#endif DISPATCH_PICK_FALLBACK(rijndael_setup, simple_setup); } diff --git a/symm/rijndael.c b/symm/rijndael.c index 58d896f9..4c8837d2 100644 --- a/symm/rijndael.c +++ b/symm/rijndael.c @@ -84,6 +84,10 @@ CPU_DISPATCH(EMPTY, EMPTY, void, rijndael_dblk, extern rijndael_eblk__functype rijndael_eblk_x86ish_aesni; extern rijndael_dblk__functype rijndael_dblk_x86ish_aesni; #endif +#if CPUFAM_ARMEL && HAVE_AS_ARMV8_CRYPTO +extern rijndael_eblk__functype rijndael_eblk_arm_crypto; +extern rijndael_dblk__functype rijndael_dblk_arm_crypto; +#endif static rijndael_eblk__functype *pick_eblk(void) { @@ -91,6 +95,10 @@ static rijndael_eblk__functype *pick_eblk(void) DISPATCH_PICK_COND(rijndael_eblk, rijndael_eblk_x86ish_aesni, cpu_feature_p(CPUFEAT_X86_AESNI)); #endif +#if CPUFAM_ARMEL && HAVE_AS_ARMV8_CRYPTO + DISPATCH_PICK_COND(rijndael_eblk, rijndael_eblk_arm_crypto, + cpu_feature_p(CPUFEAT_ARM_AES)); +#endif DISPATCH_PICK_FALLBACK(rijndael_eblk, simple_eblk); } @@ -100,6 +108,10 @@ static rijndael_dblk__functype *pick_dblk(void) DISPATCH_PICK_COND(rijndael_dblk, rijndael_dblk_x86ish_aesni, cpu_feature_p(CPUFEAT_X86_AESNI)); #endif +#if CPUFAM_ARMEL && HAVE_AS_ARMV8_CRYPTO + DISPATCH_PICK_COND(rijndael_dblk, rijndael_dblk_arm_crypto, + cpu_feature_p(CPUFEAT_ARM_AES)); +#endif DISPATCH_PICK_FALLBACK(rijndael_dblk, simple_dblk); } -- 2.11.0