# define EFLAGS_ID (1u << 21)
# define CPUID1D_SSE2 (1u << 26)
# define CPUID1D_FXSR (1u << 24)
+# define CPUID1C_PCLMUL (1u << 1)
+# define CPUID1C_SSSE3 (1u << 9)
# define CPUID1C_AESNI (1u << 25)
+# define CPUID1C_AVX (1u << 28)
# define CPUID1C_RDRAND (1u << 30)
struct cpuid { unsigned a, b, c, d; };
_(ARM_NEON, "arm:neon") \
_(ARM_V4, "arm:v4") \
_(ARM_D32, "arm:d32") \
- _(ARM_AES, "arm:aes")
+ _(ARM_AES, "arm:aes") \
+ _(ARM_PMULL, "arm:pmull")
#endif
#if CPUFAM_ARM64
# define WANTAUX(_) \
WANT_AT_HWCAP(_)
# define CAPMAP(_) \
- _(ARM_AES, "arm:aes")
+ _(ARM_NEON, "arm:neon") \
+ _(ARM_AES, "arm:aes") \
+ _(ARM_PMULL, "arm:pmull")
#endif
/* Build the bitmask for `hwcaps' from the `CAPMAP' list. */
# ifdef HWCAP2_AES
if (probed.hwcap2 & HWCAP2_AES) hw |= HF_ARM_AES;
# endif
+# ifdef HWCAP2_PMULL
+ if (probed.hwcap2 & HWCAP2_PMULL) hw |= HF_ARM_PMULL;
+# endif
#endif
#if CPUFAM_ARM64
+ if (probed.hwcap & HWCAP_ASIMD) hw |= HF_ARM_NEON;
if (probed.hwcap & HWCAP_AES) hw |= HF_ARM_AES;
+ if (probed.hwcap & HWCAP_PMULL) hw |= HF_ARM_PMULL;
#endif
/* Store the bitmask of features we probed for everyone to see. */
xmm_registers_available_p());
CASE_CPUFEAT(X86_RDRAND, "x86:rdrand",
cpuid_features_p(0, CPUID1C_RDRAND) && rdrand_works_p());
+ CASE_CPUFEAT(X86_AVX, "x86:avx",
+ cpuid_features_p(0, CPUID1C_AVX) &&
+ xmm_registers_available_p());
+ CASE_CPUFEAT(X86_SSSE3, "x86:ssse3",
+ cpuid_features_p(0, CPUID1C_SSSE3) &&
+ xmm_registers_available_p());
+ CASE_CPUFEAT(X86_PCLMUL, "x86:pclmul",
+ cpuid_features_p(0, CPUID1C_PCLMUL) &&
+ xmm_registers_available_p());
#endif
#ifdef CAPMAP
# define FEATP__CASE(feat, tok) \
AX_CFLAGS_WARN_ALL
AM_PROG_LIBTOOL
mdw_LIBTOOL_VERSION_INFO
+ case $host_os in
+ cygwin* | mingw* | pw32* | os2* | darwin* | cegcc*)
+ TEST_LDFLAGS=-no-fast-install ;;
+ *)
+ TEST_LDFLAGS=-no-install ;;
+ esac
+ AC_SUBST([TEST_LDFLAGS])
AM_PROG_AS
;;m4_define([catacomb_seen_$3/$$2], [t])])])
catacomb_CPU_FAMILIES([_def])
nil) ;;
- *) AC_MSG_ERROR([BUG: unexpected $1 \`$1']) ;;
+ *) AC_MSG_ERROR([BUG: unexpected $1 \`$$1']) ;;
esac])
dnl Now that's out the way, we can explain what we're doing.
*) AC_MSG_RESULT([$CPUFAM/$ABI]) ;;
esac
+dnl Consider enabling support for assembler-level debugging toys.
+AC_ARG_ENABLE([asm-debug],
+ AS_HELP_STRING([--enable-asm-debug],
+ [enable assembler debugging features]),
+ [mdw_asm_debug=$enableval], [mdw_asm_debug=no])
+case $CPUFAM in nil) mdw_asm_debug=no ;; esac
+case $mdw_asm_debug in
+ no) ;;
+ *) AC_DEFINE([ENABLE_ASM_DEBUG], [1],
+ [Define to enable assembler-level debugging.]) ;;
+esac
+AM_CONDITIONAL([ASM_DEBUG], [test x$mdw_asm_debug != xno])
+
+dnl Check for leading underscores on C symbols.
+LT_SYS_SYMBOL_USCORE
+case $sys_symbol_underscore in
+ yes) AC_DEFINE([SYM_USCORE], [1],
+ [Define if C symbols are prefixed with an underscore.]) ;;
+esac
+
dnl--------------------------------------------------------------------------
dnl CPU-specific assembler features.
AC_CHECK_HEADERS([linux/auxvec.h])
AC_CHECK_FUNCS([getauxval])
+dnl Some equipment for measuring CPU performance.
+AC_CHECK_HEADERS([linux/perf_event.h])
+
dnl Find the bit lengths of the obvious integer types. This will be useful
dnl when deciding on a representation for multiprecision integers.
type_bits="" type_bits_sep=""
AC_SUBST([CATACOMB_LIBS])
dnl Necessary support libraries.
-PKG_CHECK_MODULES([mLib], [mLib >= 2.2.2.1])
+PKG_CHECK_MODULES([mLib], [mLib >= 2.3.0])
AM_CFLAGS="$AM_CFLAGS $mLib_CFLAGS"
dnl--------------------------------------------------------------------------
+catacomb (2.5.1) experimental; urgency=medium
+
+ * Merge changes from 2.4.4.
+
+ -- Mark Wooding <mdw@distorted.org.uk> Sun, 29 Sep 2019 17:50:59 +0100
+
+catacomb (2.5.0) experimental; urgency=medium
+
+ * catacomb: MACs based on blockciphers: PMAC1 and CMAC (also known as
+ OMAC).
+ * catacomb: Authenticated Encryption with Additional Data (AEAD)
+ schemes. Some based on blockciphers: CCM, EAX, GCM (with CPU-specific
+ acceleration), OCB1 and OCB3 (OCB2 is broken). Also Salsa20 and
+ ChaCha20 with Poly1305: the RFC7539 scheme, and the NaCl `secret_box'
+ transform.
+ * catacomb: Implement Grantham's Frobenius test. Combine it with
+ Rabin--Miller, as Baillie--PSW, for testing given primes.
+ * catacomb-bin (catcrypt): Support AEAD schemes for bulk crypto.
+ * catacomb-bin (perftest): Options for batching; report cycle counts
+ where available.
+ * Many internal improvements: better documentation, debugging, testing,
+ etc.
+
+ -- Mark Wooding <mdw@distorted.org.uk> Sat, 21 Sep 2019 21:26:44 +0100
+
+ catacomb (2.4.5) experimental; urgency=medium
+
+ * catacomb: Fix memory leak in key-file error handling.
+ * catacomb: Don't leak internal `exptime' symbol into the global
+ namespace.
+ * catacomb: Check that the X86 `rdrand' instruction actually works
+ before leaning on it. This is in response to the well-publicized AMD
+ bug which always returns all-bits-set with the carry /set/ (indicating
+ success).
+ * catacomb: Mix in the random pool key during `rand_gate' and
+ `rand_stretch' operations.
+ * catacomb: Fix by-tag key lookups: if the query string looks like a hex
+ number, it's treated as a search by id; but if no such id is found,
+ the search wouldn't continue to look for a key by type or tag.
+ * catacomb: Fix reference leak in `key_split'.
+ * catacomb: Fix bug which completely broke `key_copydata'.
+ * catacomb: Fix segfault from `pgen', if it fails before setting up the
+ prime tester.
+ * catacomb: Propagate failure from `pgen' during Lim--Lee prime
+ generation, rather than immediately retrying.
+ * catacomb: Fix memory leak of factor vector from failed Lim--Lee prime
+ generation.
+ * catacomb: Fix segfault when multiplying the identity elliptic-curve
+ point.
+ * catacomb: Fix the `lcrand' descriptor, so that it's not advertised as
+ being cryptographically strong, and to fix a bias in its output.
+ * catacomb: Fix a memory leak in the error case of KCDSA prime
+ generation.
+ * catacomb-bin: Fix segfault from `pixie', if given an empty passphrase
+ to remember.
+ * catacomb: Check SIMD feature bit on ARM64 before using the optimized
+ code. I don't know of any ARM64 implementations which lack SIMD
+ instructions, but the bit must be there for a reason, so I might as
+ well use it.
+ * catacomb: Support parsing binary-group descriptions. This is a long-
+ standing lacuna that I've only recently noticed.
+
+ -- Mark Wooding <mdw@distorted.org.uk> Sat, 09 May 2020 17:46:24 +0100
+
catacomb (2.4.4) experimental; urgency=medium
* debian: Bump to Debhelper 10.
mpx_mul4_t_CPPFLAGS = \
$(AM_CPPFLAGS) \
-DTEST_MUL4 -DSRCDIR="\"$(srcdir)\""
+ mpx_mul4_t_LDFLAGS = $(TEST_LDFLAGS)
mpx_mul4_t_LDADD = $(top_builddir)/libcatacomb.la $(mLib_LIBS)
EXTRA_DIST += t/mpx-mul4
pkginclude_HEADERS += pgen.h
libmath_la_SOURCES += pgen.c
libmath_la_SOURCES += pgen-gcd.c
+libmath_la_SOURCES += pgen-granfrob.c
libmath_la_SOURCES += pgen-simul.c
libmath_la_SOURCES += pgen-stdev.c
-TESTS += pgen.t$(EXEEXT)
+TESTS += pgen.t$(EXEEXT) pgen-granfrob.t$(EXEEXT)
EXTRA_DIST += t/pgen
## Finding primitive elements in finite fields.
f25519_p10_t_SOURCES = f25519.c
f25519_p10_t_CPPFLAGS = $(AM_CPPFLAGS) -DTEST_RIG -DSRCDIR="\"$(srcdir)\""
f25519_p10_t_CPPFLAGS += -DF25519_IMPL=10
+ f25519_p10_t_LDFLAGS = $(TEST_LDFLAGS)
f25519_p10_t_LDADD = $(TEST_LIBS) $(top_builddir)/libcatacomb.la
f25519_p10_t_LDADD += $(mLib_LIBS) $(CATACOMB_LIBS) $(LIBS)
fgoldi_p12_t_SOURCES = fgoldi.c
fgoldi_p12_t_CPPFLAGS = $(AM_CPPFLAGS) -DTEST_RIG -DSRCDIR="\"$(srcdir)\""
fgoldi_p12_t_CPPFLAGS += -DFGOLDI_IMPL=12
+ fgoldi_p12_t_LDFLAGS = $(TEST_LDFLAGS)
fgoldi_p12_t_LDADD = $(TEST_LIBS) $(top_builddir)/libcatacomb.la
fgoldi_p12_t_LDADD += $(mLib_LIBS) $(CATACOMB_LIBS) $(LIBS)
/// MA 02111-1307, USA.
///--------------------------------------------------------------------------
-/// External definitions.
+/// Preliminaries.
#include "config.h"
#include "asm-common.h"
-///--------------------------------------------------------------------------
-/// Prologue.
-
.arch pentium4
+
.text
///--------------------------------------------------------------------------
.macro mulcore r, i, slo, shi, d0, d1=nil, d2=nil, d3=nil
// Multiply R_I by the expanded operand SLO/SHI, and leave the pieces
// of the product in registers D0, D1, D2, D3.
- pshufd \d0, \r, SHUF(3, \i, 3, \i) // (r_i, ?, r_i, ?)
+ pshufd \d0, \r, SHUF(\i, 3, \i, 3) // (r_i, ?; r_i, ?)
.ifnes "\d1", "nil"
- movdqa \d1, \slo // (s'_0, s'_1, s''_0, s''_1)
+ movdqa \d1, \slo // (s'_0, s'_1; s''_0, s''_1)
.endif
.ifnes "\d3", "nil"
- movdqa \d3, \shi // (s'_2, s'_3, s''_2, s''_3)
+ movdqa \d3, \shi // (s'_2, s'_3; s''_2, s''_3)
.endif
.ifnes "\d1", "nil"
- psrldq \d1, 4 // (s'_1, s''_0, s''_1, 0)
+ psrldq \d1, 4 // (s'_1, s''_0; s''_1, 0)
.endif
.ifnes "\d2", "nil"
- movdqa \d2, \d0 // another copy of (r_i, ?, r_i, ?)
+ movdqa \d2, \d0 // another copy of (r_i, ?; r_i, ?)
.endif
.ifnes "\d3", "nil"
- psrldq \d3, 4 // (s'_3, s''_2, s''_3, 0)
+ psrldq \d3, 4 // (s'_3, s''_2; s''_3, 0)
.endif
.ifnes "\d1", "nil"
- pmuludq \d1, \d0 // (r_i s'_1, r_i s''_1)
+ pmuludq \d1, \d0 // (r_i s'_1; r_i s''_1)
.endif
.ifnes "\d3", "nil"
- pmuludq \d3, \d0 // (r_i s'_3, r_i s''_3)
+ pmuludq \d3, \d0 // (r_i s'_3; r_i s''_3)
.endif
.ifnes "\d2", "nil"
- pmuludq \d2, \shi // (r_i s'_2, r_i s''_2)
+ pmuludq \d2, \shi // (r_i s'_2; r_i s''_2)
.endif
- pmuludq \d0, \slo // (r_i s'_0, r_i s''_0)
+ pmuludq \d0, \slo // (r_i s'_0; r_i s''_0)
.endm
.macro accum c0, c1=nil, c2=nil, c3=nil
// lane 0 or 1 of D; the high two lanes of D are clobbered. On
// completion, XMM3 is clobbered. If CC is `nil', then the
// contribution which would have been added to it is left in C.
- pshufd xmm3, \c, SHUF(2, 3, 3, 3) // (?, ?, ?, t = c'' mod B)
- psrldq xmm3, 12 // (t, 0, 0, 0) = (t, 0)
- pslldq xmm3, 2 // (t b, 0)
- paddq \c, xmm3 // (c' + t b, c'')
+ pshufd xmm3, \c, SHUF(3, 3, 3, 2) // (?, ?; ?, t = c'' mod B)
+ psrldq xmm3, 12 // (t, 0; 0, 0) = (t; 0)
+ pslldq xmm3, 2 // (t b; 0)
+ paddq \c, xmm3 // (c' + t b; c'')
.ifeqs "\pos", "lo"
movdqa \d, \c
.else
// of the value represented in C are written at POS in D, and the
// remaining bits are left at the bottom of T.
movdqa \t, \c
- psllq \t, 16 // (?, c'' b)
- pslldq \c, 8 // (0, c')
- paddq \t, \c // (?, c' + c'' b)
- psrldq \t, 8 // c' + c'' b
+ psllq \t, 16 // (?; c'' b)
+ pslldq \c, 8 // (0; c')
+ paddq \t, \c // (?; c' + c'' b)
+ psrldq \t, 8 // (c' + c'' b; 0) = (c; 0)
.ifeqs "\pos", "lo"
movdqa \d, \t
.else
punpckldq \d, \t
.endif
- psrldq \t, 4 // floor((c' + c'' b)/B)
+ psrldq \t, 4 // (floor(c/B); 0)
.endm
.macro expand z, a, b, c=nil, d=nil
// On entry, A and C hold packed 128-bit values, and Z is zero. On
// exit, A:B and C:D together hold the same values in expanded
// form. If C is `nil', then only expand A to A:B.
- movdqa \b, \a // (a_0, a_1, a_2, a_3)
+ movdqa \b, \a // (a_0, a_1; a_2, a_3)
.ifnes "\c", "nil"
- movdqa \d, \c // (c_0, c_1, c_2, c_3)
+ movdqa \d, \c // (c_0, c_1; c_2, c_3)
.endif
- punpcklwd \a, \z // (a'_0, a''_0, a'_1, a''_1)
- punpckhwd \b, \z // (a'_2, a''_2, a'_3, a''_3)
+ punpcklwd \a, \z // (a'_0, a''_0; a'_1, a''_1)
+ punpckhwd \b, \z // (a'_2, a''_2; a'_3, a''_3)
.ifnes "\c", "nil"
- punpcklwd \c, \z // (c'_0, c''_0, c'_1, c''_1)
- punpckhwd \d, \z // (c'_2, c''_2, c'_3, c''_3)
+ punpcklwd \c, \z // (c'_0, c''_0; c'_1, c''_1)
+ punpckhwd \d, \z // (c'_2, c''_2; c'_3, c''_3)
.endif
- pshufd \a, \a, SHUF(3, 1, 2, 0) // (a'_0, a'_1, a''_0, a''_1)
- pshufd \b, \b, SHUF(3, 1, 2, 0) // (a'_2, a'_3, a''_2, a''_3)
+ pshufd \a, \a, SHUF(0, 2, 1, 3) // (a'_0, a'_1; a''_0, a''_1)
+ pshufd \b, \b, SHUF(0, 2, 1, 3) // (a'_2, a'_3; a''_2, a''_3)
.ifnes "\c", "nil"
- pshufd \c, \c, SHUF(3, 1, 2, 0) // (c'_0, c'_1, c''_0, c''_1)
- pshufd \d, \d, SHUF(3, 1, 2, 0) // (c'_2, c'_3, c''_2, c''_3)
+ pshufd \c, \c, SHUF(0, 2, 1, 3) // (c'_0, c'_1; c''_0, c''_1)
+ pshufd \d, \d, SHUF(0, 2, 1, 3) // (c'_2, c'_3; c''_2, c''_3)
.endif
.endm
// we can do that, we must gather them together.
movdqa \t, \c0
movdqa \u, \c1
- punpcklqdq \t, \c2 // (y'_0, y'_2)
- punpckhqdq \c0, \c2 // (y''_0, y''_2)
- punpcklqdq \u, \c3 // (y'_1, y'_3)
- punpckhqdq \c1, \c3 // (y''_1, y''_3)
+ punpcklqdq \t, \c2 // (y'_0; y'_2)
+ punpckhqdq \c0, \c2 // (y''_0; y''_2)
+ punpcklqdq \u, \c3 // (y'_1; y'_3)
+ punpckhqdq \c1, \c3 // (y''_1; y''_3)
// Now split the double-prime pieces. The high (up to) 48 bits will
// go up; the low 16 bits go down.
movdqa \c3, \c1
psllq \c2, 48
psllq \c3, 48
- psrlq \c0, 16 // high parts of (y''_0, y''_2)
- psrlq \c1, 16 // high parts of (y''_1, y''_3)
- psrlq \c2, 32 // low parts of (y''_0, y''_2)
- psrlq \c3, 32 // low parts of (y''_1, y''_3)
+ psrlq \c0, 16 // high parts of (y''_0; y''_2)
+ psrlq \c1, 16 // high parts of (y''_1; y''_3)
+ psrlq \c2, 32 // low parts of (y''_0; y''_2)
+ psrlq \c3, 32 // low parts of (y''_1; y''_3)
.ifnes "\hi", "nil"
movdqa \hi, \c1
.endif
- pslldq \c1, 8 // high part of (0, y''_1)
+ pslldq \c1, 8 // high part of (0; y''_1)
paddq \t, \c2 // propagate down
paddq \u, \c3
- paddq \t, \c1 // and up: (y_0, y_2)
- paddq \u, \c0 // (y_1, y_3)
+ paddq \t, \c1 // and up: (y_0; y_2)
+ paddq \u, \c0 // (y_1; y_3)
.ifnes "\hi", "nil"
- psrldq \hi, 8 // high part of (y''_3, 0)
+ psrldq \hi, 8 // high part of (y''_3; 0)
.endif
// Finally extract the answer. This complicated dance is better than
// storing to memory and loading, because the piecemeal stores
// inhibit store forwarding.
- movdqa \c3, \t // (y_0, y_1)
- movdqa \lo, \t // (y^*_0, ?, ?, ?)
- psrldq \t, 8 // (y_2, 0)
- psrlq \c3, 32 // (floor(y_0/B), ?)
- paddq \c3, \u // (y_1 + floor(y_0/B), ?)
- movdqa \c1, \c3 // (y^*_1, ?, ?, ?)
- psrldq \u, 8 // (y_3, 0)
- psrlq \c3, 32 // (floor((y_1 B + y_0)/B^2, ?)
- paddq \c3, \t // (y_2 + floor((y_1 B + y_0)/B^2, ?)
- punpckldq \lo, \c3 // (y^*_0, y^*_2, ?, ?)
- psrlq \c3, 32 // (floor((y_2 B^2 + y_1 B + y_0)/B^3, ?)
- paddq \c3, \u // (y_3 + floor((y_2 B^2 + y_1 B + y_0)/B^3, ?)
+ movdqa \c3, \t // (y_0; ?)
+ movdqa \lo, \t // (y^*_0, ?; ?, ?)
+ psrldq \t, 8 // (y_2; 0)
+ psrlq \c3, 32 // (floor(y_0/B); ?)
+ paddq \c3, \u // (y_1 + floor(y_0/B); ?)
+ movdqa \c1, \c3 // (y^*_1, ?; ?, ?)
+ psrldq \u, 8 // (y_3; 0)
+ psrlq \c3, 32 // (floor((y_1 B + y_0)/B^2; ?)
+ paddq \c3, \t // (y_2 + floor((y_1 B + y_0)/B^2; ?)
+ punpckldq \lo, \c3 // (y^*_0, y^*_2; ?, ?)
+ psrlq \c3, 32 // (floor((y_2 B^2 + y_1 B + y_0)/B^3; ?)
+ paddq \c3, \u // (y_3 + floor((y_2 B^2 + y_1 B + y_0)/B^3; ?)
.ifnes "\hi", "nil"
movdqa \t, \c3
pxor \u, \u
.endif
- punpckldq \c1, \c3 // (y^*_1, y^*_3, ?, ?)
+ punpckldq \c1, \c3 // (y^*_1, y^*_3; ?, ?)
.ifnes "\hi", "nil"
psrlq \t, 32 // very high bits of y
paddq \hi, \t
// On exit, the carry registers, including XMM15, are updated to hold
// C + A; XMM0, XMM1, XMM2, and XMM3 are clobbered. The other
// registers are preserved.
- movd xmm0, [rdi + 0] // (a_0, 0)
- movd xmm1, [rdi + 4] // (a_1, 0)
- movd xmm2, [rdi + 8] // (a_2, 0)
- movd xmm15, [rdi + 12] // (a_3, 0)
- paddq xmm12, xmm0 // (c'_0 + a_0, c''_0)
- paddq xmm13, xmm1 // (c'_1 + a_1, c''_1)
- paddq xmm14, xmm2 // (c'_2 + a_2, c''_2 + a_3 b)
+ movd xmm0, [rdi + 0] // (a_0; 0)
+ movd xmm1, [rdi + 4] // (a_1; 0)
+ movd xmm2, [rdi + 8] // (a_2; 0)
+ movd xmm15, [rdi + 12] // (a_3; 0)
+ paddq xmm12, xmm0 // (c'_0 + a_0; c''_0)
+ paddq xmm13, xmm1 // (c'_1 + a_1; c''_1)
+ paddq xmm14, xmm2 // (c'_2 + a_2; c''_2 + a_3 b)
.endm
///--------------------------------------------------------------------------
mulcore xmm7, 1, xmm10, xmm11, xmm0, xmm1, xmm2
accum xmm4, xmm5, xmm6
- punpckldq xmm12, xmm15 // (w_0, 0, w_1, 0)
- punpckhdq xmm14, xmm15 // (w_2, 0, w_3, 0)
+ punpckldq xmm12, xmm15 // (w_0, 0; w_1, 0)
+ punpckhdq xmm14, xmm15 // (w_2, 0; w_3, 0)
mulcore xmm7, 2, xmm10, xmm11, xmm0, xmm1
accum xmm5, xmm6
mulcore xmm7, 3, xmm10, xmm11, xmm0
accum xmm6
- punpckldq xmm12, xmm2 // (w_0, 0, 0, 0)
- punpckldq xmm14, xmm2 // (w_2, 0, 0, 0)
- punpckhdq xmm13, xmm2 // (w_1, 0, 0, 0)
- punpckhdq xmm15, xmm2 // (w_3, 0, 0, 0)
+ punpckldq xmm12, xmm2 // (w_0, 0; 0, 0)
+ punpckldq xmm14, xmm2 // (w_2, 0; 0, 0)
+ punpckhdq xmm13, xmm2 // (w_1, 0; 0, 0)
+ punpckhdq xmm15, xmm2 // (w_3, 0; 0, 0)
// That's lots of pieces. Now we have to assemble the answer.
squash xmm3, xmm4, xmm5, xmm6, xmm0, xmm1, xmm10
mulcore xmm7, 1, xmm8, xmm9, xmm0, xmm1, xmm2
accum xmm4, xmm5, xmm6
- punpckldq xmm12, xmm15 // (w_0, 0, w_1, 0)
- punpckhdq xmm14, xmm15 // (w_2, 0, w_3, 0)
+ punpckldq xmm12, xmm15 // (w_0, 0; w_1, 0)
+ punpckhdq xmm14, xmm15 // (w_2, 0; w_3, 0)
mulcore xmm7, 2, xmm8, xmm9, xmm0, xmm1
accum xmm5, xmm6
mulcore xmm7, 3, xmm8, xmm9, xmm0
accum xmm6
- punpckldq xmm12, xmm2 // (w_0, 0, 0, 0)
- punpckldq xmm14, xmm2 // (w_2, 0, 0, 0)
- punpckhdq xmm13, xmm2 // (w_1, 0, 0, 0)
- punpckhdq xmm15, xmm2 // (w_3, 0, 0, 0)
+ punpckldq xmm12, xmm2 // (w_0, 0; 0, 0)
+ punpckldq xmm14, xmm2 // (w_2, 0; 0, 0)
+ punpckhdq xmm13, xmm2 // (w_1, 0; 0, 0)
+ punpckhdq xmm15, xmm2 // (w_3, 0; 0, 0)
// That's lots of pieces. Now we have to assemble the answer.
squash xmm3, xmm4, xmm5, xmm6, xmm0, xmm1, xmm10
///--------------------------------------------------------------------------
/// Bulk multipliers.
+FUNC(mpx_umul4_amd64_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ .arch pentium4
+ENDFUNC
+
FUNC(mpx_umul4_amd64_sse2)
// void mpx_umul4_amd64_sse2(mpw *dv, const mpw *av, const mpw *avl,
// const mpw *bv, const mpw *bvl);
ENDFUNC
+FUNC(mpxmont_mul4_amd64_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ .arch pentium4
+ENDFUNC
+
FUNC(mpxmont_mul4_amd64_sse2)
// void mpxmont_mul4_amd64_sse2(mpw *dv, const mpw *av, const mpw *bv,
// const mpw *nv, size_t n, const mpw *mi);
ENDFUNC
+FUNC(mpxmont_redc4_amd64_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ .arch pentium4
+ENDFUNC
+
FUNC(mpxmont_redc4_amd64_sse2)
// void mpxmont_redc4_amd64_sse2(mpw *dv, mpw *dvl, const mpw *nv,
// size_t n, const mpw *mi);
.endm
.macro testldcarry
- movdqu xmm12, [rcx + 0] // (c'_0, c''_0)
- movdqu xmm13, [rcx + 16] // (c'_1, c''_1)
- movdqu xmm14, [rcx + 32] // (c'_2, c''_2)
+ movdqu xmm12, [rcx + 0] // (c'_0; c''_0)
+ movdqu xmm13, [rcx + 16] // (c'_1; c''_1)
+ movdqu xmm14, [rcx + 32] // (c'_2; c''_2)
.endm
.macro testtop u=nil
testepilogue
ENDFUNC
+ FUNC(test_mul4zc)
+ testprologue smul
+ testldcarry
+ testtop nil
+ call mul4zc
+ testtail
+ testcarryout
+ testepilogue
+ ENDFUNC
+
FUNC(test_mla4)
testprologue smul
testldcarry
testepilogue
ENDFUNC
+ FUNC(test_mla4zc)
+ testprologue smul
+ testldcarry
+ testtop nil
+ call mla4zc
+ testtail
+ testcarryout
+ testepilogue
+ ENDFUNC
+
FUNC(test_mmul4)
testprologue mmul
testtop r11
/// MA 02111-1307, USA.
///--------------------------------------------------------------------------
-/// External definitions.
+/// Preliminaries.
#include "config.h"
#include "asm-common.h"
-///--------------------------------------------------------------------------
-/// Prologue.
-
.arch pentium4
+
.text
///--------------------------------------------------------------------------
.macro mulcore r, s, d0, d1=nil, d2=nil, d3=nil
// Load a word r_i from R, multiply by the expanded operand [S], and
// leave the pieces of the product in registers D0, D1, D2, D3.
- movd \d0, \r // (r_i, 0, 0, 0)
+ movd \d0, \r // (r_i, 0; 0, 0)
.ifnes "\d1", "nil"
- movdqa \d1, [\s] // (s'_0, s'_1, s''_0, s''_1)
+ movdqa \d1, [\s] // (s'_0, s'_1; s''_0, s''_1)
.endif
.ifnes "\d3", "nil"
- movdqa \d3, [\s + 16] // (s'_2, s'_3, s''_2, s''_3)
+ movdqa \d3, [\s + 16] // (s'_2, s'_3; s''_2, s''_3)
.endif
- pshufd \d0, \d0, SHUF(3, 0, 3, 0) // (r_i, ?, r_i, ?)
+ pshufd \d0, \d0, SHUF(0, 3, 0, 3) // (r_i, ?; r_i, ?)
.ifnes "\d1", "nil"
- psrldq \d1, 4 // (s'_1, s''_0, s''_1, 0)
+ psrldq \d1, 4 // (s'_1, s''_0; s''_1, 0)
.endif
.ifnes "\d2", "nil"
.ifnes "\d3", "nil"
- movdqa \d2, \d3 // another copy of (s'_2, s'_3, ...)
+ movdqa \d2, \d3 // another copy of (s'_2, s'_3; ...)
.else
- movdqa \d2, \d0 // another copy of (r_i, ?, r_i, ?)
+ movdqa \d2, \d0 // another copy of (r_i, ?; r_i, ?)
.endif
.endif
.ifnes "\d3", "nil"
- psrldq \d3, 4 // (s'_3, s''_2, s''_3, 0)
+ psrldq \d3, 4 // (s'_3, s''_2; s''_3, 0)
.endif
.ifnes "\d1", "nil"
- pmuludq \d1, \d0 // (r_i s'_1, r_i s''_1)
+ pmuludq \d1, \d0 // (r_i s'_1; r_i s''_1)
.endif
.ifnes "\d3", "nil"
- pmuludq \d3, \d0 // (r_i s'_3, r_i s''_3)
+ pmuludq \d3, \d0 // (r_i s'_3; r_i s''_3)
.endif
.ifnes "\d2", "nil"
.ifnes "\d3", "nil"
- pmuludq \d2, \d0 // (r_i s'_2, r_i s''_2)
+ pmuludq \d2, \d0 // (r_i s'_2; r_i s''_2)
.else
pmuludq \d2, [\s + 16]
.endif
.endif
- pmuludq \d0, [\s] // (r_i s'_0, r_i s''_0)
+ pmuludq \d0, [\s] // (r_i s'_0; r_i s''_0)
.endm
.macro accum c0, c1=nil, c2=nil, c3=nil
// carry registers. On completion, XMM3 is clobbered. If CC is
// `nil', then the contribution which would have been added to it is
// left in C.
- pshufd xmm3, \c, SHUF(2, 3, 3, 3) // (?, ?, ?, t = c'' mod B)
- psrldq xmm3, 12 // (t, 0, 0, 0) = (t, 0)
- pslldq xmm3, 2 // (t b, 0)
- paddq \c, xmm3 // (c' + t b, c'')
+ pshufd xmm3, \c, SHUF(3, 3, 3, 2) // (?, ?; ?, t = c'' mod B)
+ psrldq xmm3, 12 // (t, 0; 0, 0) = (t, 0)
+ pslldq xmm3, 2 // (t b; 0)
+ paddq \c, xmm3 // (c' + t b; c'')
movd \d, \c
psrlq \c, 32 // floor(c/B)
.ifnes "\cc", "nil"
// of the value represented in C are written to D, and the remaining
// bits are left at the bottom of T.
movdqa \t, \c
- psllq \t, 16 // (?, c'' b)
- pslldq \c, 8 // (0, c')
- paddq \t, \c // (?, c' + c'' b)
- psrldq \t, 8 // c' + c'' b
+ psllq \t, 16 // (?; c'' b)
+ pslldq \c, 8 // (0; c')
+ paddq \t, \c // (?; c' + c'' b)
+ psrldq \t, 8 // (c' + c'' b; 0) = (c; 0)
movd \d, \t
- psrldq \t, 4 // floor((c' + c'' b)/B)
+ psrldq \t, 4 // (floor(c/B); 0)
.endm
.macro expand z, a, b, c=nil, d=nil
// On entry, A and C hold packed 128-bit values, and Z is zero. On
// exit, A:B and C:D together hold the same values in expanded
// form. If C is `nil', then only expand A to A:B.
- movdqa \b, \a // (a_0, a_1, a_2, a_3)
+ movdqa \b, \a // (a_0, a_1; a_2, a_3)
.ifnes "\c", "nil"
- movdqa \d, \c // (c_0, c_1, c_2, c_3)
+ movdqa \d, \c // (c_0, c_1; c_2, c_3)
.endif
- punpcklwd \a, \z // (a'_0, a''_0, a'_1, a''_1)
- punpckhwd \b, \z // (a'_2, a''_2, a'_3, a''_3)
+ punpcklwd \a, \z // (a'_0, a''_0; a'_1, a''_1)
+ punpckhwd \b, \z // (a'_2, a''_2; a'_3, a''_3)
.ifnes "\c", "nil"
- punpcklwd \c, \z // (c'_0, c''_0, c'_1, c''_1)
- punpckhwd \d, \z // (c'_2, c''_2, c'_3, c''_3)
+ punpcklwd \c, \z // (c'_0, c''_0; c'_1, c''_1)
+ punpckhwd \d, \z // (c'_2, c''_2; c'_3, c''_3)
.endif
- pshufd \a, \a, SHUF(3, 1, 2, 0) // (a'_0, a'_1, a''_0, a''_1)
- pshufd \b, \b, SHUF(3, 1, 2, 0) // (a'_2, a'_3, a''_2, a''_3)
+ pshufd \a, \a, SHUF(0, 2, 1, 3) // (a'_0, a'_1; a''_0, a''_1)
+ pshufd \b, \b, SHUF(0, 2, 1, 3) // (a'_2, a'_3; a''_2, a''_3)
.ifnes "\c", "nil"
- pshufd \c, \c, SHUF(3, 1, 2, 0) // (c'_0, c'_1, c''_0, c''_1)
- pshufd \d, \d, SHUF(3, 1, 2, 0) // (c'_2, c'_3, c''_2, c''_3)
+ pshufd \c, \c, SHUF(0, 2, 1, 3) // (c'_0, c'_1; c''_0, c''_1)
+ pshufd \d, \d, SHUF(0, 2, 1, 3) // (c'_2, c'_3; c''_2, c''_3)
.endif
.endm
// we can do that, we must gather them together.
movdqa \t, \c0
movdqa \u, \c1
- punpcklqdq \t, \c2 // (y'_0, y'_2)
- punpckhqdq \c0, \c2 // (y''_0, y''_2)
- punpcklqdq \u, \c3 // (y'_1, y'_3)
- punpckhqdq \c1, \c3 // (y''_1, y''_3)
+ punpcklqdq \t, \c2 // (y'_0; y'_2)
+ punpckhqdq \c0, \c2 // (y''_0; y''_2)
+ punpcklqdq \u, \c3 // (y'_1; y'_3)
+ punpckhqdq \c1, \c3 // (y''_1; y''_3)
// Now split the double-prime pieces. The high (up to) 48 bits will
// go up; the low 16 bits go down.
movdqa \c3, \c1
psllq \c2, 48
psllq \c3, 48
- psrlq \c0, 16 // high parts of (y''_0, y''_2)
- psrlq \c1, 16 // high parts of (y''_1, y''_3)
- psrlq \c2, 32 // low parts of (y''_0, y''_2)
- psrlq \c3, 32 // low parts of (y''_1, y''_3)
+ psrlq \c0, 16 // high parts of (y''_0; y''_2)
+ psrlq \c1, 16 // high parts of (y''_1; y''_3)
+ psrlq \c2, 32 // low parts of (y''_0; y''_2)
+ psrlq \c3, 32 // low parts of (y''_1; y''_3)
.ifnes "\hi", "nil"
movdqa \hi, \c1
.endif
- pslldq \c1, 8 // high part of (0, y''_1)
+ pslldq \c1, 8 // high part of (0; y''_1)
paddq \t, \c2 // propagate down
paddq \u, \c3
- paddq \t, \c1 // and up: (y_0, y_2)
- paddq \u, \c0 // (y_1, y_3)
+ paddq \t, \c1 // and up: (y_0; y_2)
+ paddq \u, \c0 // (y_1; y_3)
.ifnes "\hi", "nil"
- psrldq \hi, 8 // high part of (y''_3, 0)
+ psrldq \hi, 8 // high part of (y''_3; 0)
.endif
// Finally extract the answer. This complicated dance is better than
// storing to memory and loading, because the piecemeal stores
// inhibit store forwarding.
- movdqa \c3, \t // (y_0, y_1)
- movdqa \lo, \t // (y^*_0, ?, ?, ?)
- psrldq \t, 8 // (y_2, 0)
- psrlq \c3, 32 // (floor(y_0/B), ?)
- paddq \c3, \u // (y_1 + floor(y_0/B), ?)
- movdqa \c1, \c3 // (y^*_1, ?, ?, ?)
- psrldq \u, 8 // (y_3, 0)
- psrlq \c3, 32 // (floor((y_1 B + y_0)/B^2, ?)
- paddq \c3, \t // (y_2 + floor((y_1 B + y_0)/B^2, ?)
- punpckldq \lo, \c3 // (y^*_0, y^*_2, ?, ?)
- psrlq \c3, 32 // (floor((y_2 B^2 + y_1 B + y_0)/B^3, ?)
- paddq \c3, \u // (y_3 + floor((y_2 B^2 + y_1 B + y_0)/B^3, ?)
+ movdqa \c3, \t // (y_0; ?)
+ movdqa \lo, \t // (y^*_0, ?; ?, ?)
+ psrldq \t, 8 // (y_2; 0)
+ psrlq \c3, 32 // (floor(y_0/B); ?)
+ paddq \c3, \u // (y_1 + floor(y_0/B); ?)
+ movdqa \c1, \c3 // (y^*_1, ?; ?, ?)
+ psrldq \u, 8 // (y_3; 0)
+ psrlq \c3, 32 // (floor((y_1 B + y_0)/B^2; ?)
+ paddq \c3, \t // (y_2 + floor((y_1 B + y_0)/B^2; ?)
+ punpckldq \lo, \c3 // (y^*_0, y^*_2; ?, ?)
+ psrlq \c3, 32 // (floor((y_2 B^2 + y_1 B + y_0)/B^3; ?)
+ paddq \c3, \u // (y_3 + floor((y_2 B^2 + y_1 B + y_0)/B^3; ?)
.ifnes "\hi", "nil"
movdqa \t, \c3
pxor \u, \u
.endif
- punpckldq \c1, \c3 // (y^*_1, y^*_3, ?, ?)
+ punpckldq \c1, \c3 // (y^*_1, y^*_3; ?, ?)
.ifnes "\hi", "nil"
psrlq \t, 32 // very high bits of y
paddq \hi, \t
// On exit, the carry registers, including XMM7, are updated to hold
// C + A; XMM0, XMM1, XMM2, and XMM3 are clobbered. The other
// registers are preserved.
- movd xmm0, [edi + 0] // (a_0, 0)
- movd xmm1, [edi + 4] // (a_1, 0)
- movd xmm2, [edi + 8] // (a_2, 0)
- movd xmm7, [edi + 12] // (a_3, 0)
-
- paddq xmm4, xmm0 // (c'_0 + a_0, c''_0)
- paddq xmm5, xmm1 // (c'_1 + a_1, c''_1)
- paddq xmm6, xmm2 // (c'_2 + a_2, c''_2 + a_3 b)
+ movd xmm0, [edi + 0] // (a_0; 0)
+ movd xmm1, [edi + 4] // (a_1; 0)
+ movd xmm2, [edi + 8] // (a_2; 0)
+ movd xmm7, [edi + 12] // (a_3; 0)
+
+ paddq xmm4, xmm0 // (c'_0 + a_0; c''_0)
+ paddq xmm5, xmm1 // (c'_1 + a_1; c''_1)
+ paddq xmm6, xmm2 // (c'_2 + a_2; c''_2 + a_3 b)
.endm
///--------------------------------------------------------------------------
///--------------------------------------------------------------------------
/// Bulk multipliers.
+FUNC(mpx_umul4_x86_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ // and drop through...
+ .arch pentium4
+ENDFUNC
+
FUNC(mpx_umul4_x86_sse2)
// void mpx_umul4_x86_sse2(mpw *dv, const mpw *av, const mpw *avl,
// const mpw *bv, const mpw *bvl);
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
+ setfp
and esp, ~15
sub esp, 32
endprologue
ENDFUNC
+FUNC(mpxmont_mul4_x86_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ // and drop through...
+ .arch pentium4
+ENDFUNC
+
FUNC(mpxmont_mul4_x86_sse2)
// void mpxmont_mul4_x86_sse2(mpw *dv, const mpw *av, const mpw *bv,
// const mpw *nv, size_t n, const mpw *mi);
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
+ setfp
and esp, ~15
sub esp, 112
endprologue
ENDFUNC
+FUNC(mpxmont_redc4_x86_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ // and drop through...
+ .arch pentium4
+ENDFUNC
+
FUNC(mpxmont_redc4_x86_sse2)
// void mpxmont_redc4_x86_sse2(mpw *dv, mpw *dvl, const mpw *nv,
// size_t n, const mpw *mi);
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
+ setfp
and esp, ~15
sub esp, 76
endprologue
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
+ setfp
and esp, ~15
sub esp, 3*32 + 4*4
endprologue
.macro testldcarry c
mov ecx, \c // -> c
- movdqu xmm4, [ecx + 0] // (c'_0, c''_0)
- movdqu xmm5, [ecx + 16] // (c'_1, c''_1)
- movdqu xmm6, [ecx + 32] // (c'_2, c''_2)
+ movdqu xmm4, [ecx + 0] // (c'_0; c''_0)
+ movdqu xmm5, [ecx + 16] // (c'_1; c''_1)
+ movdqu xmm6, [ecx + 32] // (c'_2; c''_2)
.endm
.macro testexpand v=nil, y=nil
testepilogue
ENDFUNC
+ FUNC(test_mul4zc)
+ testprologue [ebp + 36]
+ testldcarry [ebp + 24]
+ testexpand nil, [ebp + 32]
+ mov edi, [ebp + 20]
+ testtop nil, [ebp + 28]
+ call mul4zc
+ testtail [ebp + 40]
+ testcarryout [ebp + 24]
+ testepilogue
+ ENDFUNC
+
FUNC(test_mla4)
testprologue [ebp + 36]
testldcarry [ebp + 24]
testepilogue
ENDFUNC
+ FUNC(test_mla4zc)
+ testprologue [ebp + 36]
+ testldcarry [ebp + 24]
+ testexpand nil, [ebp + 32]
+ mov edi, [ebp + 20]
+ testtop nil, [ebp + 28]
+ call mla4zc
+ testtail [ebp + 40]
+ testcarryout [ebp + 24]
+ testepilogue
+ ENDFUNC
+
FUNC(test_mmul4)
testprologue [ebp + 48]
testexpand [ebp + 40], [ebp + 44]
TEST_LIBS = libsymm.la
+noinst_LTLIBRARIES += libsymmtest.la
+libsymmtest_la_SOURCES =
+libsymmtest_la_CFLAGS = $(AM_CFLAGS) -DSRCDIR=\"$(srcdir)\"
+TEST_LIBS += libsymmtest.la
+
VPATH += $(srcdir)/modes
###--------------------------------------------------------------------------
blkc="$(BLKCS)" \
blkcmode="$(BLKCMODES)" \
blkcciphermode="$(BLKCCIPHERMODES)" \
+ blkcaeadmode="$(BLKCAEADMODES)" \
+ blkcmacmode="$(BLKCMACMODES)" \
hash="$(HASHES)" \
hashmode="$(HASHMODES)" \
hashciphermode="$(HASHCIPHERMODES)" \
## Initialize lists of known classes.
ALL_CIPHERS = $(CIPHER_MODES)
+ALL_AEADS = $(AEAD_MODES)
ALL_HASHES = $(HASHES)
ALL_MACS = $(MAC_MODES)
BLKCCIPHERMODES =
BLKCMODES += $(BLKCCIPHERMODES)
+BLKCAEADMODES =
+BLKCMODES += $(BLKCAEADMODES)
+
+BLKCMACMODES =
+BLKCMODES += $(BLKCMACMODES)
+
## A tool for translating the AES-contest test vectors into a form our test
## rigs understand.
EXTRA_DIST += aes-trans
BLKCS += cast128 cast256
libsymm_la_SOURCES += cast-s.c cast-sk.c cast-base.h
cast256.log: t/cast256
-EXTRA_DIST += t/cast256.aes
+EXTRA_DIST += t/cast256.aes t/cast256.local
MAINTAINERCLEANFILES += $(srcdir)/t/cast256
-t/cast256: t/cast256.aes
- $(AM_V_GEN)$(srcdir)/aes-trans CAST256 \
- <$(srcdir)/t/cast256.aes \
- >$(srcdir)/t/cast256.new && \
- mv $(srcdir)/t/cast256.new $(srcdir)/t/cast256
+t/cast256: t/cast256.aes t/cast256.local
+ $(AM_V_GEN)cd $(srcdir) && \
+ { ./aes-trans CAST256 <t/cast256.aes && \
+ cat t/cast256.local; \
+ } >t/cast256.new && \
+ mv t/cast256.new t/cast256
## IBM's `DES' block cipher, by Feistel, Coppersmith, and others.
BLKCS += des des3
mv $(precomp)/symm/mars-tab.c.new $(precomp)/symm/mars-tab.c
endif
mars.log: t/mars
-EXTRA_DIST += t/mars.aes
+EXTRA_DIST += t/mars.aes t/mars.local
MAINTAINERCLEANFILES += $(srcdir)/t/mars
-t/mars: t/mars.aes
- $(AM_V_GEN)$(srcdir)/aes-trans Mars \
- <$(srcdir)/t/mars.aes \
- >$(srcdir)/t/mars.new && \
- mv $(srcdir)/t/mars.new $(srcdir)/t/mars
+t/mars: t/mars.aes t/mars.local
+ $(AM_V_GEN)cd $(srcdir) && \
+ { ./aes-trans Mars <t/mars.aes && \
+ cat t/mars.local; \
+ } >t/mars.new && \
+ mv t/mars.new t/mars
## Daemen, Peeters, Van Assche and Rijmen's `Noekeon'.
BLKCS += noekeon
$(precomp)/symm/rijndael-tab.c
endif
rijndael.log: t/rijndael
-EXTRA_DIST += t/rijndael.aes
+EXTRA_DIST += t/rijndael.aes t/rijndael.local
MAINTAINERCLEANFILES += $(srcdir)/t/rijndael
-t/rijndael: t/rijndael.aes
- $(AM_V_GEN)$(srcdir)/aes-trans Rijndael \
- <$(srcdir)/t/rijndael.aes \
- >$(srcdir)/t/rijndael.new && \
- mv $(srcdir)/t/rijndael.new $(srcdir)/t/rijndael
+t/rijndael: t/rijndael.aes t/rijndael.local
+ $(AM_V_GEN)cd $(srcdir) && \
+ { ./aes-trans Rijndael <t/rijndael.aes && \
+ cat t/rijndael.local; \
+ } >t/rijndael.new && \
+ mv t/rijndael.new t/rijndael
## Massey's `SAFER' block ciphers.
BLKCS += safer safersk
check_PROGRAMS += serpent-check
TESTS += serpent-check
serpent.log: t/serpent
-EXTRA_DIST += t/serpent.aes
+EXTRA_DIST += t/serpent.aes t/serpent.local
MAINTAINERCLEANFILES += $(srcdir)/t/serpent
-t/serpent: t/serpent.aes
- $(AM_V_GEN)$(srcdir)/aes-trans Serpent -v rev=1 \
- <$(srcdir)/t/serpent.aes \
- >$(srcdir)/t/serpent.new && \
- mv $(srcdir)/t/serpent.new $(srcdir)/t/serpent
+t/serpent: t/serpent.aes t/serpent.local
+ $(AM_V_GEN)cd $(srcdir) && \
+ { ./aes-trans Serpent -v rev=1 <t/serpent.aes && \
+ cat t/serpent.local; \
+ } >t/serpent.new && \
+ mv t/serpent.new t/serpent
## The National Security Agency's `Skipjack' block cipher. You don't want to
## use this.
$(precomp)/symm/twofish-tab.c
endif
twofish.log: t/twofish
-EXTRA_DIST += t/twofish.aes
+EXTRA_DIST += t/twofish.aes t/twofish.local
MAINTAINERCLEANFILES += $(srcdir)/t/twofish
-t/twofish: t/twofish.aes
- $(AM_V_GEN)$(srcdir)/aes-trans Twofish \
- <$(srcdir)/t/twofish.aes \
- >$(srcdir)/t/twofish.new && \
- mv $(srcdir)/t/twofish.new $(srcdir)/t/twofish
+t/twofish: t/twofish.aes t/twofish.local
+ $(AM_V_GEN)cd $(srcdir) && \
+ { ./aes-trans Twofish <t/twofish.aes && \
+ cat t/twofish.local; \
+ } >t/twofish.new && \
+ mv t/twofish.new t/twofish
## The old NIST modes for DES.
BLKCCIPHERMODES += cbc cfb ecb ofb
## Counter mode.
BLKCCIPHERMODES += counter
+## CMAC mode.
+BLKCMACMODES += cmac pmac1
+
+## Various AEAD modes.
+pkginclude_HEADERS += ocb.h
+BLKCAEADMODES += ccm eax gcm ocb1 ocb3
+libsymm_la_SOURCES += ccm.c gcm.c ocb.c
+if CPUFAM_X86
+libsymm_la_SOURCES += gcm-x86ish-pclmul.S
+endif
+if CPUFAM_AMD64
+libsymm_la_SOURCES += gcm-x86ish-pclmul.S
+endif
+if CPUFAM_ARMEL
+libsymm_la_SOURCES += gcm-arm-crypto.S
+endif
+if CPUFAM_ARM64
+libsymm_la_SOURCES += gcm-arm64-pmull.S
+endif
+
+TESTS += gcm.t$(EXEEXT)
+EXTRA_DIST += t/gcm
+
###--------------------------------------------------------------------------
### Hash functions.
poly1305_p11_t_SOURCES = poly1305.c
poly1305_p11_t_CPPFLAGS = $(AM_CPPFLAGS) -DTEST_RIG -DSRCDIR="\"$(srcdir)\""
poly1305_p11_t_CPPFLAGS += -DPOLY1305_IMPL=11
+ poly1305_p11_t_LDFLAGS = $(TEST_LDFLAGS)
poly1305_p11_t_LDADD = $(TEST_LIBS) $(top_builddir)/libcatacomb.la
poly1305_p11_t_LDADD += $(mLib_LIBS) $(CATACOMB_LIBS) $(LIBS)
+## Combining Salsa20/ChaCha with Poly1305.
+pkginclude_HEADERS += latinpoly.h latinpoly-def.h
+libsymm_la_SOURCES += latinpoly.c chacha-poly1305.c salsa20-poly1305.c
+libsymmtest_la_SOURCES += latinpoly-test.c latinpoly-test.h
+
+ALL_AEADS += chacha20-poly1305 salsa20-poly1305
+ALL_AEADS += chacha12-poly1305 salsa2012-poly1305
+ALL_AEADS += chacha8-poly1305 salsa208-poly1305
+ALL_AEADS += chacha20-naclbox salsa20-naclbox
+ALL_AEADS += chacha12-naclbox salsa2012-naclbox
+ALL_AEADS += chacha8-naclbox salsa208-naclbox
+STUBS_HDR += ChaCha20-Poly1305,chacha20-poly1305,latinpoly
+STUBS_HDR += ChaCha12-Poly1305,chacha12-poly1305,latinpoly
+STUBS_HDR += ChaCha8-Poly1305,chacha8-poly1305,latinpoly
+STUBS_HDR += Salsa20-Poly1305,salsa20-poly1305,latinpoly
+STUBS_HDR += Salsa20/12-Poly1305,salsa2012-poly1305,latinpoly
+STUBS_HDR += Salsa20/8-Poly1305,salsa208-poly1305,latinpoly
+STUBS_HDR += ChaCha20-NaClBox,chacha20-naclbox,latinpoly
+STUBS_HDR += ChaCha12-NaClBox,chacha12-naclbox,latinpoly
+STUBS_HDR += ChaCha8-NaClBox,chacha8-naclbox,latinpoly
+STUBS_HDR += Salsa20-NaClBox,salsa20-naclbox,latinpoly
+STUBS_HDR += Salsa20/12-NaClBox,salsa2012-naclbox,latinpoly
+STUBS_HDR += Salsa20/8-NaClBox,salsa208-naclbox,latinpoly
+TESTS += chacha-poly1305.t$(EXEEXT)
+TESTS += salsa20-poly1305.t$(EXEEXT)
+
###--------------------------------------------------------------------------
### Autogenerated mode implementations.
pkginclude_HEADERS += $(GENMODES_H)
$(GENMODES_H): modes/gen-stamp
+## Additional test machinery.
+libsymmtest_la_SOURCES += modes-test.c modes-test.h
+
###--------------------------------------------------------------------------
### Autogenerated stub headers.
$(AM_V_GEN)$(multigen) -g $(srcdir)/gthingtab.c.in gciphertab.c \
what=gcipher cls=gccipher thing="$(ALL_CIPHERS)"
+## Table of AEAD classes.
+pkginclude_HEADERS += gaead.h
+CLEANFILES += gaeadtab.c
+libsymm_la_SOURCES += gaead.c
+nodist_libsymm_la_SOURCES += gaeadtab.c
+gaeadtab.c: gthingtab.c.in Makefile.am
+ $(AM_V_GEN)$(multigen) -g $(srcdir)/gthingtab.c.in gaeadtab.c \
+ what=gaead cls=gcaead thing="$(ALL_AEADS)"
+
## Table of hash classes.
pkginclude_HEADERS += ghash.h ghash-def.h
CLEANFILES += ghashtab.c
## Run the test programs.
TESTS += $(SYMM_TESTS)
EXTRA_DIST += $(SYMM_TEST_FILES)
+EXTRA_DIST += $(REGRESSION_TEST_FILES)
-## A piece of sample text for round-trip testing encryption modes.
-EXTRA_DIST += daftstory.h
+t/modes/%.regress:
+ $(MAKE) modes/$*.t && \
+ mkdir -p $(srcdir)/t/modes/ && \
+ modes/$*.t -o$(srcdir)/$@.new && \
+ mv $(srcdir)/$@.new $(srcdir)/$@
## Clean the debris from the `modes' subdirectory.
CLEANFILES += modes/*.to modes/*.t$(EXEEXT)
#include "grand.h"
#include "keysz.h"
#include "paranoia.h"
+#include "rsvr.h"
/*----- Global variables --------------------------------------------------*/
#if CPUFAM_X86 || CPUFAM_AMD64
extern core__functype chacha_core_x86ish_sse2;
+extern core__functype chacha_core_x86ish_avx;
#endif
#if CPUFAM_ARMEL
static core__functype *pick_core(void)
{
#if CPUFAM_X86 || CPUFAM_AMD64
+ DISPATCH_PICK_COND(chacha_core, chacha_core_x86ish_avx,
+ cpu_feature_p(CPUFEAT_X86_AVX));
DISPATCH_PICK_COND(chacha_core, chacha_core_x86ish_sse2,
cpu_feature_p(CPUFEAT_X86_SSE2));
#endif
cpu_feature_p(CPUFEAT_ARM_NEON));
#endif
#if CPUFAM_ARM64
- DISPATCH_PICK_COND(chacha_core, chacha_core_arm64, 1);
+ DISPATCH_PICK_COND(chacha_core, chacha_core_arm64,
+ cpu_feature_p(CPUFEAT_ARM_NEON));
#endif
DISPATCH_PICK_FALLBACK(chacha_core, simple_core);
}
/*----- ChaCha implementation ---------------------------------------------*/
+static const octet zerononce[XCHACHA_NONCESZ];
+
/* --- @chacha_init@ --- *
*
* Arguments: @chacha_ctx *ctx@ = context to fill in
void chacha_init(chacha_ctx *ctx, const void *key, size_t ksz,
const void *nonce)
{
- static const octet zerononce[CHACHA_NONCESZ];
-
populate(ctx->a, key, ksz);
chacha_setnonce(ctx, nonce ? nonce : zerononce);
}
void chacha_seeku64(chacha_ctx *ctx, kludge64 i)
{
ctx->a[12] = LO64(i); ctx->a[13] = HI64(i);
- ctx->bufi = CHACHA_OUTSZ;
+ ctx->off = CHACHA_OUTSZ;
}
void chacha_seek_ietf(chacha_ctx *ctx, uint32 i)
* to @dest@.
*/
+static const rsvr_policy policy = { 0, CHACHA_OUTSZ, CHACHA_OUTSZ };
+
#define CHACHA_ENCRYPT(r, ctx, src, dest, sz) \
chacha##r##_encrypt(ctx, src, dest, sz)
#define DEFENCRYPT(r) \
chacha_matrix b; \
const octet *s = src; \
octet *d = dest; \
- size_t n; \
+ rsvr_plan plan; \
kludge64 pos, delta; \
\
- SALSA20_OUTBUF(ctx, d, s, sz); \
- if (!sz) return; \
+ rsvr_mkplan(&plan, &policy, ctx->off, sz); \
\
- if (!dest) { \
- n = sz/CHACHA_OUTSZ; \
- pos = chacha_tellu64(ctx); \
- ASSIGN64(delta, n); \
- ADD64(pos, pos, delta); \
- chacha_seeku64(ctx, pos); \
- sz = sz%CHACHA_OUTSZ; \
- } else if (!src) { \
- while (sz >= CHACHA_OUTSZ) { \
- core(r, ctx->a, b); \
- CHACHA_STEP(ctx->a); \
- SALSA20_GENFULL(b, d); \
- sz -= CHACHA_OUTSZ; \
+ if (plan.head) { \
+ if (!ctx->off) { \
+ core(r, ctx->a, b); CHACHA_STEP(ctx->a); \
+ SALSA20_PREPBUF(ctx, b); \
} \
- } else { \
- while (sz >= CHACHA_OUTSZ) { \
- core(r, ctx->a, b); \
- CHACHA_STEP(ctx->a); \
- SALSA20_MIXFULL(b, d, s); \
- sz -= CHACHA_OUTSZ; \
+ SALSA20_OUTBUF(ctx, d, s, plan.head); \
+ } \
+ \
+ ctx->off -= plan.from_rsvr; \
+ \
+ if (!d) { \
+ if (plan.from_input) { \
+ pos = chacha_tellu64(ctx); \
+ ASSIGN64(delta, plan.from_input/SALSA20_OUTSZ); \
+ ADD64(pos, pos, delta); \
+ chacha_seeku64(ctx, pos); \
} \
+ } else if (!s) while (plan.from_input) { \
+ core(r, ctx->a, b); CHACHA_STEP(ctx->a); \
+ SALSA20_GENFULL(b, d); plan.from_input -= CHACHA_OUTSZ; \
+ } else while (plan.from_input) { \
+ core(r, ctx->a, b); CHACHA_STEP(ctx->a); \
+ SALSA20_MIXFULL(b, d, s); plan.from_input -= CHACHA_OUTSZ; \
} \
\
- if (sz) { \
- core(r, ctx->a, b); \
- CHACHA_STEP(ctx->a); \
+ if (plan.tail) { \
+ core(r, ctx->a, b); CHACHA_STEP(ctx->a); \
SALSA20_PREPBUF(ctx, b); \
- SALSA20_OUTBUF(ctx, d, s, sz); \
- assert(!sz); \
+ SALSA20_OUTBUF(ctx, d, s, plan.tail); \
} \
}
CHACHA_VARS(DEFENCRYPT)
void XCHACHA_INIT(r, XCHACHA_CTX(r) *ctx, \
const void *key, size_t ksz, const void *nonce) \
{ \
- static const octet zerononce[XCHACHA_NONCESZ]; \
- \
populate(ctx->k, key, ksz); \
ctx->s.a[ 0] = CHACHA_A256; \
ctx->s.a[ 1] = CHACHA_B256; \
#include "grand.h"
#include "keysz.h"
#include "paranoia.h"
+#include "rsvr.h"
#include "salsa20.h"
#include "salsa20-core.h"
#if CPUFAM_X86 || CPUFAM_AMD64
extern core__functype salsa20_core_x86ish_sse2;
+extern core__functype salsa20_core_x86ish_avx;
#endif
#if CPUFAM_ARMEL
static core__functype *pick_core(void)
{
#if CPUFAM_X86 || CPUFAM_AMD64
+ DISPATCH_PICK_COND(salsa20_core, salsa20_core_x86ish_avx,
+ cpu_feature_p(CPUFEAT_X86_AVX));
DISPATCH_PICK_COND(salsa20_core, salsa20_core_x86ish_sse2,
cpu_feature_p(CPUFEAT_X86_SSE2));
#endif
cpu_feature_p(CPUFEAT_ARM_NEON));
#endif
#if CPUFAM_ARM64
- DISPATCH_PICK_COND(salsa20_core, salsa20_core_arm64, 1);
+ DISPATCH_PICK_COND(salsa20_core, salsa20_core_arm64,
+ cpu_feature_p(CPUFEAT_ARM_NEON));
#endif
DISPATCH_PICK_FALLBACK(salsa20_core, simple_core);
}
/*----- Salsa20 implementation --------------------------------------------*/
+static const octet zerononce[XSALSA20_NONCESZ];
+
/* --- @salsa20_init@ --- *
*
* Arguments: @salsa20_ctx *ctx@ = context to fill in
void salsa20_init(salsa20_ctx *ctx, const void *key, size_t ksz,
const void *nonce)
{
- static const octet zerononce[SALSA20_NONCESZ];
-
populate(ctx->a, key, ksz);
salsa20_setnonce(ctx, nonce ? nonce : zerononce);
}
void salsa20_seeku64(salsa20_ctx *ctx, kludge64 i)
{
ctx->a[8] = LO64(i); ctx->a[5] = HI64(i);
- ctx->bufi = SALSA20_OUTSZ;
+ ctx->off = 0;
}
void salsa20_seek_ietf(salsa20_ctx *ctx, uint32 i)
* to @dest@.
*/
+static const rsvr_policy policy = { 0, SALSA20_OUTSZ, SALSA20_OUTSZ };
+
#define SALSA20_ENCRYPT(r, ctx, src, dest, sz) \
SALSA20_DECOR(salsa20, r, _encrypt)(ctx, src, dest, sz)
#define DEFENCRYPT(r) \
salsa20_matrix b; \
const octet *s = src; \
octet *d = dest; \
- size_t n; \
+ rsvr_plan plan; \
kludge64 pos, delta; \
\
- SALSA20_OUTBUF(ctx, d, s, sz); \
- if (!sz) return; \
+ rsvr_mkplan(&plan, &policy, ctx->off, sz); \
\
- if (!dest) { \
- n = sz/SALSA20_OUTSZ; \
- pos = salsa20_tellu64(ctx); \
- ASSIGN64(delta, n); \
- ADD64(pos, pos, delta); \
- salsa20_seeku64(ctx, pos); \
- sz = sz%SALSA20_OUTSZ; \
- } else if (!src) { \
- while (sz >= SALSA20_OUTSZ) { \
- core(r, ctx->a, b); \
- SALSA20_STEP(ctx->a); \
- SALSA20_GENFULL(b, d); \
- sz -= SALSA20_OUTSZ; \
+ if (plan.head) { \
+ if (!ctx->off) { \
+ core(r, ctx->a, b); SALSA20_STEP(ctx->a); \
+ SALSA20_PREPBUF(ctx, b); \
} \
- } else { \
- while (sz >= SALSA20_OUTSZ) { \
- core(r, ctx->a, b); \
- SALSA20_STEP(ctx->a); \
- SALSA20_MIXFULL(b, d, s); \
- sz -= SALSA20_OUTSZ; \
+ SALSA20_OUTBUF(ctx, d, s, plan.head); \
+ } \
+ \
+ ctx->off -= plan.from_rsvr; \
+ \
+ if (!d) { \
+ if (plan.from_input) { \
+ pos = salsa20_tellu64(ctx); \
+ ASSIGN64(delta, plan.from_input/SALSA20_OUTSZ); \
+ ADD64(pos, pos, delta); \
+ salsa20_seeku64(ctx, pos); \
} \
+ } else if (!s) while (plan.from_input) { \
+ core(r, ctx->a, b); SALSA20_STEP(ctx->a); \
+ SALSA20_GENFULL(b, d); plan.from_input -= SALSA20_OUTSZ; \
+ } else while (plan.from_input) { \
+ core(r, ctx->a, b); SALSA20_STEP(ctx->a); \
+ SALSA20_MIXFULL(b, d, s); plan.from_input -= SALSA20_OUTSZ; \
} \
\
- if (sz) { \
- core(r, ctx->a, b); \
- SALSA20_STEP(ctx->a); \
+ if (plan.tail) { \
+ core(r, ctx->a, b); SALSA20_STEP(ctx->a); \
SALSA20_PREPBUF(ctx, b); \
- SALSA20_OUTBUF(ctx, d, s, sz); \
- assert(!sz); \
+ SALSA20_OUTBUF(ctx, d, s, plan.tail); \
} \
}
SALSA20_VARS(DEFENCRYPT)
void XSALSA20_INIT(r, XSALSA20_CTX(r) *ctx, \
const void *key, size_t ksz, const void *nonce) \
{ \
- static const octet zerononce[XSALSA20_NONCESZ]; \
- \
populate(ctx->k, key, ksz); \
ctx->s.a[ 0] = SALSA20_A256; \
ctx->s.a[ 1] = SALSA20_B256; \
xchacha20 {
## Unfortunately, XChaCha isn't actually defined anywhere, even though it's
- ## obvious how to do it. These test vectors are from
- ## https://github.com/DaGenix/rust-crypto/blob/master/src/chacha20.rs
+ ## obvious how to do it.
+ ## These test vectors are from
+ ## https://github.com/DaGenix/rust-crypto/blob/master/src/chacha20.rs
1b27556473e985d462cd51197a9a46c76009549eac6474f206c4ee0844f68389
69696ee955b62b73cd62bda875fc73d68219e0036b7a0b37 "" 0 ""
4febf2fe4b359c508dc5e8b5980c88e38946d8f18f313465c862a08782648248018dacdcb904178853a46dca3a0eaaee747cba97434eaffad58fea8222047e0de6c3a6775106e0331ad714d2f27a55641340a1f1dd9f94532e68cb241cbdd150970d14e05c5b173193fb14f51c41f393835bf7f416a7e0bba81ffb8b13af0e21691d7ecec93b75e6e4183a;
+
+ ## This one's from draft-irtf-cfrg-xchacha-03.
+ 808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f
+ 404142434445464748494a4b4c4d4e4f5051525354555658 "" 64
+ 5468652064686f6c65202870726f6e6f756e6365642022646f6c65222920697320616c736f206b6e6f776e2061732074686520417369617469632077696c6420646f672c2072656420646f672c20616e642077686973746c696e6720646f672e2049742069732061626f7574207468652073697a65206f662061204765726d616e20736865706865726420627574206c6f6f6b73206d6f7265206c696b652061206c6f6e672d6c656767656420666f782e205468697320686967686c7920656c757369766520616e6420736b696c6c6564206a756d70657220697320636c6173736966696564207769746820776f6c7665732c20636f796f7465732c206a61636b616c732c20616e6420666f78657320696e20746865207461786f6e6f6d69632066616d696c792043616e696461652e
+ 7d0a2e6b7f7c65a236542630294e063b7ab9b555a5d5149aa21e4ae1e4fbce87ecc8e08a8b5e350abe622b2ffa617b202cfad72032a3037e76ffdcdc4376ee053a190d7e46ca1de04144850381b9cb29f051915386b8a710b8ac4d027b8b050f7cba5854e028d564e453b8a968824173fc16488b8970cac828f11ae53cabd20112f87107df24ee6183d2274fe4c8b1485534ef2c5fbc1ec24bfc3663efaa08bc047d29d25043532db8391a8a3d776bf4372a6955827ccb0cdd4af403a7ce4c63d595c75a43e045f0cce1f29c8b93bd65afc5974922f214a40b7c402cdb91ae73c0b63615cdad0480680f16515a7ace9d39236464328a37743ffc28f4ddb324f4d0f5bbdc270c65b1749a6efff1fbaa09536175ccd29fb9e6057b307320d316838a9c71f70b5b5907a66f7ea49aadc409;
}
chacha8 {
c46ec1b18ce8a878725a37e780dfb735
1ada31d5cf688221 "" 0 ""
826abdd84460e2e9349f0ef4af5b179b426e4b2d109a9c5bb44000ae51bea90a496beeef62a76850ff3f0402c4ddc99f6db07f151c1c0dfac2e56565d62896255b23132e7b469c7bfb88fa95d44ca5ae3e45e848a4108e98bad7a9eb15512784a6a9e6e591dce674120acaf9040ff50ff3ac30ccfb5e14204f5e4268b90a8804;
+ c46ec1b18ce8a878725a37e780dfb7351f68ed2e194c79fbc6aebee1a667975d
+ 1ada31d5cf688221 "" 0 ""
+ f63a89b75c2271f9368816542ba52f06ed49241792302b00b5e8f80ae9a473afc25b218f519af0fdd406362e8d69de7f54c604a6e00f353f110f771bdca8ab92e5fbc34e60a1d9a9db17345b0a402736853bf910b060bdf1f897b6290f01d138ae2c4c90225ba9ea14d518f55929dea098ca7a6ccfe61227053c84e49a4a3332;
## Tests from RFC7539.
000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
000000000000000000000002 "" 0 ""
965e3bc6f9ec7ed9560808f4d229f94b137ff275ca9b3fcbdd59deaad23310ae;
}
+
+chacha20-poly1305 {
+ ## Test from RFC7539.
+ 808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f
+ 070000004041424344454647
+ 50515253c0c1c2c3c4c5c6c7
+ 4c616469657320616e642047656e746c656d656e206f662074686520636c617373206f66202739393a204966204920636f756c64206f6666657220796f75206f6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73637265656e20776f756c642062652069742e
+ d31a8d34648e60db7b86afbc53ef7ec2a4aded51296e08fea9e2b5a736ee62d63dbea45e8ca9671282fafb69da92728b1a71de0a9e060b2905d6a5b67ecd3b3692ddbd7f2d778b8c9803aee328091b58fab324e4fad675945585808b4831d7bc3ff4def08e4b7a9de576d26586cec64b6116
+ 1ae10b594f09e26a7e902ecbd0600691;
+
+ ## Test from draft-irtf-cfrg-xchacha-03.
+ 808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f
+ 404142434445464748494a4b4c4d4e4f5051525354555657
+ 50515253c0c1c2c3c4c5c6c7
+ 4c616469657320616e642047656e746c656d656e206f662074686520636c617373206f66202739393a204966204920636f756c64206f6666657220796f75206f6e6c79206f6e652074697020666f7220746865206675747572652c2073756e73637265656e20776f756c642062652069742e
+ bd6d179d3e83d43b9576579493c0e939572a1700252bfaccbed2902c21396cbb731c7f1b0b4aa6440bf3a82f4eda7e39ae64c6708c54c216cb96b72e1213b4522f8c9ba40db5d945b11b69b982c1bb9e3f3fac2bc369488f76b2383565d3fff921f9664c97637da9768812f615c68b13b52e
+ c0875924c1c7987947deafd8780acf49;
+}