math/Makefile.am, symm/Makefile.am: Use `--no-install' on oddball tests.
[catacomb] / symm / salsa20-arm64.S
CommitLineData
e492db88
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of Salsa20 for AArch64
4///
5/// (c) 2018 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
28/// External definitions.
29
30#include "config.h"
31#include "asm-common.h"
32
33///--------------------------------------------------------------------------
34/// Main.code.
35
36 .arch armv8-a
37 .text
38
39FUNC(salsa20_core_arm64)
40
41 // Arguments are in registers.
42 // w0 is the number of rounds to perform
43 // x1 points to the input matrix
44 // x2 points to the output matrix
45
46 // First job is to slurp the matrix into the SIMD registers. The
47 // words have already been permuted conveniently to make them line up
48 // better for SIMD processing.
49 //
50 // The textbook arrangement of the matrix is this.
51 //
52 // [C K K K]
53 // [K C N N]
54 // [T T C K]
55 // [K K K C]
56 //
57 // But we've rotated the columns up so that the main diagonal with
58 // the constants on it end up in the first row, giving something more
59 // like
60 //
61 // [C C C C]
62 // [K T K K]
63 // [T K K N]
64 // [K K N K]
65 //
66 // so the transformation looks like this:
67 //
68 // [ 0 1 2 3] [ 0 5 10 15] (a, v4)
69 // [ 4 5 6 7] --> [ 4 9 14 3] (b, v5)
70 // [ 8 9 10 11] [ 8 13 2 7] (c, v6)
71 // [12 13 14 15] [12 1 6 11] (d, v7)
72 //
73 // We need a copy for later. Rather than waste time copying them by
74 // hand, we'll use the three-address nature of the instruction set.
75 // But this means that the main loop is offset by a bit.
76 ld1 {v0.4s-v3.4s}, [x1]
77
78 // Apply a column quarterround to each of the columns simultaneously,
79 // moving the results to their working registers. Alas, there
80 // doesn't seem to be a packed word rotate, so we have to synthesize
81 // it.
82
83 // b ^= (a + d) <<< 7
84 add v16.4s, v0.4s, v3.4s
85 shl v17.4s, v16.4s, #7
86 ushr v16.4s, v16.4s, #25
87 orr v16.16b, v16.16b, v17.16b
88 eor v5.16b, v1.16b, v16.16b
89
90 // c ^= (b + a) <<< 9
91 add v16.4s, v5.4s, v0.4s
92 shl v17.4s, v16.4s, #9
93 ushr v16.4s, v16.4s, #23
94 orr v16.16b, v16.16b, v17.16b
95 eor v6.16b, v2.16b, v16.16b
96
97 // d ^= (c + b) <<< 13
98 add v16.4s, v6.4s, v5.4s
99 ext v5.16b, v5.16b, v5.16b, #12
100 shl v17.4s, v16.4s, #13
101 ushr v16.4s, v16.4s, #19
102 orr v16.16b, v16.16b, v17.16b
103 eor v7.16b, v3.16b, v16.16b
104
105 // a ^= (d + c) <<< 18
106 add v16.4s, v7.4s, v6.4s
107 ext v6.16b, v6.16b, v6.16b, #8
108 ext v7.16b, v7.16b, v7.16b, #4
109 shl v17.4s, v16.4s, #18
110 ushr v16.4s, v16.4s, #14
111 orr v16.16b, v16.16b, v17.16b
112 eor v4.16b, v0.16b, v16.16b
113
1140:
115 // The transpose conveniently only involves reordering elements of
116 // individual rows, which can be done quite easily, and reordering
117 // the rows themselves, which is a trivial renaming. It doesn't
118 // involve any movement of elements between rows.
119 //
120 // [ 0 5 10 15] [ 0 5 10 15] (a, v4)
121 // [ 4 9 14 3] --> [ 1 6 11 12] (b, v7)
122 // [ 8 13 2 7] [ 2 7 8 13] (c, v6)
123 // [12 1 6 11] [ 3 4 9 14] (d, v5)
124 //
125 // The reorderings have been pushed upwards to reduce delays.
126 sub w0, w0, #2
127
128 // Apply the row quarterround to each of the columns (yes!)
129 // simultaneously.
130
131 // b ^= (a + d) <<< 7
132 add v16.4s, v4.4s, v5.4s
133 shl v17.4s, v16.4s, #7
134 ushr v16.4s, v16.4s, #25
135 orr v16.16b, v16.16b, v17.16b
136 eor v7.16b, v7.16b, v16.16b
137
138 // c ^= (b + a) <<< 9
139 add v16.4s, v7.4s, v4.4s
140 shl v17.4s, v16.4s, #9
141 ushr v16.4s, v16.4s, #23
142 orr v16.16b, v16.16b, v17.16b
143 eor v6.16b, v6.16b, v16.16b
144
145 // d ^= (c + b) <<< 13
146 add v16.4s, v6.4s, v7.4s
147 ext v7.16b, v7.16b, v7.16b, #12
148 shl v17.4s, v16.4s, #13
149 ushr v16.4s, v16.4s, #19
150 orr v16.16b, v16.16b, v17.16b
151 eor v5.16b, v5.16b, v16.16b
152
153 // a ^= (d + c) <<< 18
154 add v16.4s, v5.4s, v6.4s
155 ext v6.16b, v6.16b, v6.16b, #8
156 ext v5.16b, v5.16b, v5.16b, #4
157 shl v17.4s, v16.4s, #18
158 ushr v16.4s, v16.4s, #14
159 orr v16.16b, v16.16b, v17.16b
160 eor v4.16b, v4.16b, v16.16b
161
162 // We had to undo the transpose ready for the next loop. Again, push
163 // back the reorderings to reduce latency. Decrement the loop
164 // counter and see if we should go round again.
165 cbz w0, 9f
166
167 // Do the first half of the next round because this loop is offset.
168
169 // b ^= (a + d) <<< 7
170 add v16.4s, v4.4s, v7.4s
171 shl v17.4s, v16.4s, #7
172 ushr v16.4s, v16.4s, #25
173 orr v16.16b, v16.16b, v17.16b
174 eor v5.16b, v5.16b, v16.16b
175
176 // c ^= (b + a) <<< 9
177 add v16.4s, v5.4s, v4.4s
178 shl v17.4s, v16.4s, #9
179 ushr v16.4s, v16.4s, #23
180 orr v16.16b, v16.16b, v17.16b
181 eor v6.16b, v6.16b, v16.16b
182
183 // d ^= (c + b) <<< 13
184 add v16.4s, v6.4s, v5.4s
185 ext v5.16b, v5.16b, v5.16b, #12
186 shl v17.4s, v16.4s, #13
187 ushr v16.4s, v16.4s, #19
188 orr v16.16b, v16.16b, v17.16b
189 eor v7.16b, v7.16b, v16.16b
190
191 // a ^= (d + c) <<< 18
192 add v16.4s, v7.4s, v6.4s
193 ext v6.16b, v6.16b, v6.16b, #8
194 ext v7.16b, v7.16b, v7.16b, #4
195 shl v17.4s, v16.4s, #18
196 ushr v16.4s, v16.4s, #14
197 orr v16.16b, v16.16b, v17.16b
198 eor v4.16b, v4.16b, v16.16b
199
200 b 0b
201
202 // Almost there. Firstly the feedfoward addition. Also, establish
203 // constants which will be useful later.
2049: add v0.4s, v0.4s, v4.4s // 0, 5, 10, 15
205 movi v16.2d, #0xffffffff // = (-1, 0, -1, 0)
206 movi d17, #-1 // = (-1, -1, 0, 0)
207 add v1.4s, v1.4s, v5.4s // 4, 9, 14, 3
208 add v2.4s, v2.4s, v6.4s // 8, 13, 2, 7
209 add v3.4s, v3.4s, v7.4s // 12, 1, 6, 11
210
211 // Next we must undo the permutation which was already applied to the
212 // input. The core trick is from Dan Bernstein's `armneon3'
213 // implementation, but with a lot of liposuction.
214 mov v4.16b, v0.16b
215
216 // Sort out the columns by pairs.
217 bif v0.16b, v3.16b, v16.16b // 0, 1, 10, 11
218 bif v3.16b, v2.16b, v16.16b // 12, 13, 6, 7
219 bif v2.16b, v1.16b, v16.16b // 8, 9, 2, 3
220 bif v1.16b, v4.16b, v16.16b // 4, 5, 14, 15
221 mov v4.16b, v0.16b
222 mov v5.16b, v3.16b
223
224 // Now fix up the remaining discrepancies.
225 bif v0.16b, v2.16b, v17.16b // 0, 1, 2, 3
226 bif v3.16b, v1.16b, v17.16b // 12, 13, 14, 15
227 bif v2.16b, v4.16b, v17.16b // 8, 9, 10, 11
228 bif v1.16b, v5.16b, v17.16b // 4, 5, 6, 7
229
230 // And with that, we're done.
231 st1 {v0.4s-v3.4s}, [x2]
232 ret
233
234ENDFUNC
235
236///----- That's all, folks --------------------------------------------------