/// MA 02111-1307, USA.
///--------------------------------------------------------------------------
-/// External definitions.
+/// Preliminaries.
#include "config.h"
#include "asm-common.h"
-///--------------------------------------------------------------------------
-/// Main.code.
-
.arch armv7-a
.fpu neon
+
.text
+///--------------------------------------------------------------------------
+/// Main.code.
+
FUNC(salsa20_core_arm_neon)
// Arguments are in registers.
// [ 8 9 10 11] [ 8 13 2 7] (c, q10)
// [12 13 14 15] [12 1 6 11] (d, q11)
//
- // [ 0 1 2 3] (a, q8)
- // [ 4 5 6 7] (b, q9)
- // [ 8 9 10 11] (c, q10)
- // [12 13 14 15] (d, q11)
- //
// We need a copy for later. Rather than waste time copying them by
// hand, we'll use the three-address nature of the instruction set.
// But this means that the main loop is offset by a bit.
- vldmia r1, {d24-d31}
+ vldmia r1, {QQ(q12, q15)}
// Apply a column quarterround to each of the columns simultaneously,
// moving the results to their working registers. Alas, there
b 0b
- // Almost there. Firstly the feedfoward addition, and then we have
- // to write out the result. Here we have to undo the permutation
- // which was already applied to the input.
-9: vadd.u32 q8, q8, q12
- vadd.u32 q9, q9, q13
- vadd.u32 q10, q10, q14
- vadd.u32 q11, q11, q15
-
- vst1.32 {d16[0]}, [r2 :32]!
- vst1.32 {d22[1]}, [r2 :32]!
- vst1.32 {d21[0]}, [r2 :32]!
- vst1.32 {d19[1]}, [r2 :32]!
-
- vst1.32 {d18[0]}, [r2 :32]!
- vst1.32 {d16[1]}, [r2 :32]!
- vst1.32 {d23[0]}, [r2 :32]!
- vst1.32 {d21[1]}, [r2 :32]!
-
- vst1.32 {d20[0]}, [r2 :32]!
- vst1.32 {d18[1]}, [r2 :32]!
- vst1.32 {d17[0]}, [r2 :32]!
- vst1.32 {d23[1]}, [r2 :32]!
-
- vst1.32 {d22[0]}, [r2 :32]!
- vst1.32 {d20[1]}, [r2 :32]!
- vst1.32 {d19[0]}, [r2 :32]!
- vst1.32 {d17[1]}, [r2 :32]!
+ // Almost there. Firstly the feedfoward addition. Also, establish a
+ // constant which will be useful later.
+9: vadd.u32 q0, q8, q12 // 0, 5, 10, 15
+ vmov.i64 q12, #0xffffffff // = (-1, 0, -1, 0)
+ vadd.u32 q1, q9, q13 // 4, 9, 14, 3
+ vadd.u32 q2, q10, q14 // 8, 13, 2, 7
+ vadd.u32 q3, q11, q15 // 12, 1, 6, 11
+
+ // Next we must undo the permutation which was already applied to the
+ // input. The core trick is from Dan Bernstein's `armneon3'
+ // implementation, but with a lot of liposuction.
+ vmov q15, q0
+
+ // Sort out the columns by pairs.
+ vbif q0, q3, q12 // 0, 1, 10, 11
+ vbif q3, q2, q12 // 12, 13, 6, 7
+ vbif q2, q1, q12 // 8, 9, 2, 3
+ vbif q1, q15, q12 // 4, 5, 14, 15
+
+ // Now fix up the remaining discrepancies.
+ vswp D1(q0), D1(q2)
+ vswp D1(q1), D1(q3)
// And with that, we're done.
+ vstmia r2, {QQ(q0, q3)}
bx r14
ENDFUNC