+/// -*- mode: asm; asm-comment-char: ?/ -*-
+///
+/// Fancy SIMD implementation of Salsa20 for AArch64
+///
+/// (c) 2018 Straylight/Edgeware
+///
+
+///----- Licensing notice ---------------------------------------------------
+///
+/// This file is part of Catacomb.
+///
+/// Catacomb is free software; you can redistribute it and/or modify
+/// it under the terms of the GNU Library General Public License as
+/// published by the Free Software Foundation; either version 2 of the
+/// License, or (at your option) any later version.
+///
+/// Catacomb is distributed in the hope that it will be useful,
+/// but WITHOUT ANY WARRANTY; without even the implied warranty of
+/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+/// GNU Library General Public License for more details.
+///
+/// You should have received a copy of the GNU Library General Public
+/// License along with Catacomb; if not, write to the Free
+/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+/// MA 02111-1307, USA.
+
+///--------------------------------------------------------------------------
+/// External definitions.
+
+#include "config.h"
+#include "asm-common.h"
+
+///--------------------------------------------------------------------------
+/// Main.code.
+
+ .arch armv8-a
+ .text
+
+FUNC(salsa20_core_arm64)
+
+ // Arguments are in registers.
+ // w0 is the number of rounds to perform
+ // x1 points to the input matrix
+ // x2 points to the output matrix
+
+ // First job is to slurp the matrix into the SIMD registers. The
+ // words have already been permuted conveniently to make them line up
+ // better for SIMD processing.
+ //
+ // The textbook arrangement of the matrix is this.
+ //
+ // [C K K K]
+ // [K C N N]
+ // [T T C K]
+ // [K K K C]
+ //
+ // But we've rotated the columns up so that the main diagonal with
+ // the constants on it end up in the first row, giving something more
+ // like
+ //
+ // [C C C C]
+ // [K T K K]
+ // [T K K N]
+ // [K K N K]
+ //
+ // so the transformation looks like this:
+ //
+ // [ 0 1 2 3] [ 0 5 10 15] (a, v4)
+ // [ 4 5 6 7] --> [ 4 9 14 3] (b, v5)
+ // [ 8 9 10 11] [ 8 13 2 7] (c, v6)
+ // [12 13 14 15] [12 1 6 11] (d, v7)
+ //
+ // We need a copy for later. Rather than waste time copying them by
+ // hand, we'll use the three-address nature of the instruction set.
+ // But this means that the main loop is offset by a bit.
+ ld1 {v0.4s-v3.4s}, [x1]
+
+ // Apply a column quarterround to each of the columns simultaneously,
+ // moving the results to their working registers. Alas, there
+ // doesn't seem to be a packed word rotate, so we have to synthesize
+ // it.
+
+ // b ^= (a + d) <<< 7
+ add v16.4s, v0.4s, v3.4s
+ shl v17.4s, v16.4s, #7
+ ushr v16.4s, v16.4s, #25
+ orr v16.16b, v16.16b, v17.16b
+ eor v5.16b, v1.16b, v16.16b
+
+ // c ^= (b + a) <<< 9
+ add v16.4s, v5.4s, v0.4s
+ shl v17.4s, v16.4s, #9
+ ushr v16.4s, v16.4s, #23
+ orr v16.16b, v16.16b, v17.16b
+ eor v6.16b, v2.16b, v16.16b
+
+ // d ^= (c + b) <<< 13
+ add v16.4s, v6.4s, v5.4s
+ ext v5.16b, v5.16b, v5.16b, #12
+ shl v17.4s, v16.4s, #13
+ ushr v16.4s, v16.4s, #19
+ orr v16.16b, v16.16b, v17.16b
+ eor v7.16b, v3.16b, v16.16b
+
+ // a ^= (d + c) <<< 18
+ add v16.4s, v7.4s, v6.4s
+ ext v6.16b, v6.16b, v6.16b, #8
+ ext v7.16b, v7.16b, v7.16b, #4
+ shl v17.4s, v16.4s, #18
+ ushr v16.4s, v16.4s, #14
+ orr v16.16b, v16.16b, v17.16b
+ eor v4.16b, v0.16b, v16.16b
+
+0:
+ // The transpose conveniently only involves reordering elements of
+ // individual rows, which can be done quite easily, and reordering
+ // the rows themselves, which is a trivial renaming. It doesn't
+ // involve any movement of elements between rows.
+ //
+ // [ 0 5 10 15] [ 0 5 10 15] (a, v4)
+ // [ 4 9 14 3] --> [ 1 6 11 12] (b, v7)
+ // [ 8 13 2 7] [ 2 7 8 13] (c, v6)
+ // [12 1 6 11] [ 3 4 9 14] (d, v5)
+ //
+ // The reorderings have been pushed upwards to reduce delays.
+ sub w0, w0, #2
+
+ // Apply the row quarterround to each of the columns (yes!)
+ // simultaneously.
+
+ // b ^= (a + d) <<< 7
+ add v16.4s, v4.4s, v5.4s
+ shl v17.4s, v16.4s, #7
+ ushr v16.4s, v16.4s, #25
+ orr v16.16b, v16.16b, v17.16b
+ eor v7.16b, v7.16b, v16.16b
+
+ // c ^= (b + a) <<< 9
+ add v16.4s, v7.4s, v4.4s
+ shl v17.4s, v16.4s, #9
+ ushr v16.4s, v16.4s, #23
+ orr v16.16b, v16.16b, v17.16b
+ eor v6.16b, v6.16b, v16.16b
+
+ // d ^= (c + b) <<< 13
+ add v16.4s, v6.4s, v7.4s
+ ext v7.16b, v7.16b, v7.16b, #12
+ shl v17.4s, v16.4s, #13
+ ushr v16.4s, v16.4s, #19
+ orr v16.16b, v16.16b, v17.16b
+ eor v5.16b, v5.16b, v16.16b
+
+ // a ^= (d + c) <<< 18
+ add v16.4s, v5.4s, v6.4s
+ ext v6.16b, v6.16b, v6.16b, #8
+ ext v5.16b, v5.16b, v5.16b, #4
+ shl v17.4s, v16.4s, #18
+ ushr v16.4s, v16.4s, #14
+ orr v16.16b, v16.16b, v17.16b
+ eor v4.16b, v4.16b, v16.16b
+
+ // We had to undo the transpose ready for the next loop. Again, push
+ // back the reorderings to reduce latency. Decrement the loop
+ // counter and see if we should go round again.
+ cbz w0, 9f
+
+ // Do the first half of the next round because this loop is offset.
+
+ // b ^= (a + d) <<< 7
+ add v16.4s, v4.4s, v7.4s
+ shl v17.4s, v16.4s, #7
+ ushr v16.4s, v16.4s, #25
+ orr v16.16b, v16.16b, v17.16b
+ eor v5.16b, v5.16b, v16.16b
+
+ // c ^= (b + a) <<< 9
+ add v16.4s, v5.4s, v4.4s
+ shl v17.4s, v16.4s, #9
+ ushr v16.4s, v16.4s, #23
+ orr v16.16b, v16.16b, v17.16b
+ eor v6.16b, v6.16b, v16.16b
+
+ // d ^= (c + b) <<< 13
+ add v16.4s, v6.4s, v5.4s
+ ext v5.16b, v5.16b, v5.16b, #12
+ shl v17.4s, v16.4s, #13
+ ushr v16.4s, v16.4s, #19
+ orr v16.16b, v16.16b, v17.16b
+ eor v7.16b, v7.16b, v16.16b
+
+ // a ^= (d + c) <<< 18
+ add v16.4s, v7.4s, v6.4s
+ ext v6.16b, v6.16b, v6.16b, #8
+ ext v7.16b, v7.16b, v7.16b, #4
+ shl v17.4s, v16.4s, #18
+ ushr v16.4s, v16.4s, #14
+ orr v16.16b, v16.16b, v17.16b
+ eor v4.16b, v4.16b, v16.16b
+
+ b 0b
+
+ // Almost there. Firstly the feedfoward addition. Also, establish
+ // constants which will be useful later.
+9: add v0.4s, v0.4s, v4.4s // 0, 5, 10, 15
+ movi v16.2d, #0xffffffff // = (-1, 0, -1, 0)
+ movi d17, #-1 // = (-1, -1, 0, 0)
+ add v1.4s, v1.4s, v5.4s // 4, 9, 14, 3
+ add v2.4s, v2.4s, v6.4s // 8, 13, 2, 7
+ add v3.4s, v3.4s, v7.4s // 12, 1, 6, 11
+
+ // Next we must undo the permutation which was already applied to the
+ // input. The core trick is from Dan Bernstein's `armneon3'
+ // implementation, but with a lot of liposuction.
+ mov v4.16b, v0.16b
+
+ // Sort out the columns by pairs.
+ bif v0.16b, v3.16b, v16.16b // 0, 1, 10, 11
+ bif v3.16b, v2.16b, v16.16b // 12, 13, 6, 7
+ bif v2.16b, v1.16b, v16.16b // 8, 9, 2, 3
+ bif v1.16b, v4.16b, v16.16b // 4, 5, 14, 15
+ mov v4.16b, v0.16b
+ mov v5.16b, v3.16b
+
+ // Now fix up the remaining discrepancies.
+ bif v0.16b, v2.16b, v17.16b // 0, 1, 2, 3
+ bif v3.16b, v1.16b, v17.16b // 12, 13, 14, 15
+ bif v2.16b, v4.16b, v17.16b // 8, 9, 10, 11
+ bif v1.16b, v5.16b, v17.16b // 4, 5, 6, 7
+
+ // And with that, we're done.
+ st1 {v0.4s-v3.4s}, [x2]
+ ret
+
+ENDFUNC
+
+///----- That's all, folks --------------------------------------------------