symm/salsa20-*.S: Fix vector diagrams to be little-endian.
[catacomb] / symm / salsa20-arm64.S
CommitLineData
e492db88
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of Salsa20 for AArch64
4///
5/// (c) 2018 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
df07f2c0 28/// Preliminaries.
e492db88
MW
29
30#include "config.h"
31#include "asm-common.h"
32
e492db88 33 .arch armv8-a
df07f2c0 34
e492db88
MW
35 .text
36
df07f2c0 37///--------------------------------------------------------------------------
6d7e6032 38/// Main code.
df07f2c0 39
e492db88
MW
40FUNC(salsa20_core_arm64)
41
42 // Arguments are in registers.
43 // w0 is the number of rounds to perform
44 // x1 points to the input matrix
45 // x2 points to the output matrix
46
47 // First job is to slurp the matrix into the SIMD registers. The
48 // words have already been permuted conveniently to make them line up
49 // better for SIMD processing.
50 //
51 // The textbook arrangement of the matrix is this.
52 //
53 // [C K K K]
54 // [K C N N]
55 // [T T C K]
56 // [K K K C]
57 //
58 // But we've rotated the columns up so that the main diagonal with
59 // the constants on it end up in the first row, giving something more
60 // like
61 //
62 // [C C C C]
63 // [K T K K]
64 // [T K K N]
65 // [K K N K]
66 //
67 // so the transformation looks like this:
68 //
69 // [ 0 1 2 3] [ 0 5 10 15] (a, v4)
70 // [ 4 5 6 7] --> [ 4 9 14 3] (b, v5)
71 // [ 8 9 10 11] [ 8 13 2 7] (c, v6)
72 // [12 13 14 15] [12 1 6 11] (d, v7)
73 //
74 // We need a copy for later. Rather than waste time copying them by
75 // hand, we'll use the three-address nature of the instruction set.
76 // But this means that the main loop is offset by a bit.
77 ld1 {v0.4s-v3.4s}, [x1]
78
79 // Apply a column quarterround to each of the columns simultaneously,
80 // moving the results to their working registers. Alas, there
81 // doesn't seem to be a packed word rotate, so we have to synthesize
82 // it.
83
84 // b ^= (a + d) <<< 7
85 add v16.4s, v0.4s, v3.4s
86 shl v17.4s, v16.4s, #7
87 ushr v16.4s, v16.4s, #25
88 orr v16.16b, v16.16b, v17.16b
89 eor v5.16b, v1.16b, v16.16b
90
91 // c ^= (b + a) <<< 9
92 add v16.4s, v5.4s, v0.4s
93 shl v17.4s, v16.4s, #9
94 ushr v16.4s, v16.4s, #23
95 orr v16.16b, v16.16b, v17.16b
96 eor v6.16b, v2.16b, v16.16b
97
98 // d ^= (c + b) <<< 13
99 add v16.4s, v6.4s, v5.4s
100 ext v5.16b, v5.16b, v5.16b, #12
101 shl v17.4s, v16.4s, #13
102 ushr v16.4s, v16.4s, #19
103 orr v16.16b, v16.16b, v17.16b
104 eor v7.16b, v3.16b, v16.16b
105
106 // a ^= (d + c) <<< 18
107 add v16.4s, v7.4s, v6.4s
108 ext v6.16b, v6.16b, v6.16b, #8
109 ext v7.16b, v7.16b, v7.16b, #4
110 shl v17.4s, v16.4s, #18
111 ushr v16.4s, v16.4s, #14
112 orr v16.16b, v16.16b, v17.16b
113 eor v4.16b, v0.16b, v16.16b
114
1150:
116 // The transpose conveniently only involves reordering elements of
117 // individual rows, which can be done quite easily, and reordering
118 // the rows themselves, which is a trivial renaming. It doesn't
119 // involve any movement of elements between rows.
120 //
121 // [ 0 5 10 15] [ 0 5 10 15] (a, v4)
122 // [ 4 9 14 3] --> [ 1 6 11 12] (b, v7)
123 // [ 8 13 2 7] [ 2 7 8 13] (c, v6)
124 // [12 1 6 11] [ 3 4 9 14] (d, v5)
125 //
126 // The reorderings have been pushed upwards to reduce delays.
127 sub w0, w0, #2
128
129 // Apply the row quarterround to each of the columns (yes!)
130 // simultaneously.
131
132 // b ^= (a + d) <<< 7
133 add v16.4s, v4.4s, v5.4s
134 shl v17.4s, v16.4s, #7
135 ushr v16.4s, v16.4s, #25
136 orr v16.16b, v16.16b, v17.16b
137 eor v7.16b, v7.16b, v16.16b
138
139 // c ^= (b + a) <<< 9
140 add v16.4s, v7.4s, v4.4s
141 shl v17.4s, v16.4s, #9
142 ushr v16.4s, v16.4s, #23
143 orr v16.16b, v16.16b, v17.16b
144 eor v6.16b, v6.16b, v16.16b
145
146 // d ^= (c + b) <<< 13
147 add v16.4s, v6.4s, v7.4s
148 ext v7.16b, v7.16b, v7.16b, #12
149 shl v17.4s, v16.4s, #13
150 ushr v16.4s, v16.4s, #19
151 orr v16.16b, v16.16b, v17.16b
152 eor v5.16b, v5.16b, v16.16b
153
154 // a ^= (d + c) <<< 18
155 add v16.4s, v5.4s, v6.4s
156 ext v6.16b, v6.16b, v6.16b, #8
157 ext v5.16b, v5.16b, v5.16b, #4
158 shl v17.4s, v16.4s, #18
159 ushr v16.4s, v16.4s, #14
160 orr v16.16b, v16.16b, v17.16b
161 eor v4.16b, v4.16b, v16.16b
162
163 // We had to undo the transpose ready for the next loop. Again, push
164 // back the reorderings to reduce latency. Decrement the loop
165 // counter and see if we should go round again.
166 cbz w0, 9f
167
168 // Do the first half of the next round because this loop is offset.
169
170 // b ^= (a + d) <<< 7
171 add v16.4s, v4.4s, v7.4s
172 shl v17.4s, v16.4s, #7
173 ushr v16.4s, v16.4s, #25
174 orr v16.16b, v16.16b, v17.16b
175 eor v5.16b, v5.16b, v16.16b
176
177 // c ^= (b + a) <<< 9
178 add v16.4s, v5.4s, v4.4s
179 shl v17.4s, v16.4s, #9
180 ushr v16.4s, v16.4s, #23
181 orr v16.16b, v16.16b, v17.16b
182 eor v6.16b, v6.16b, v16.16b
183
184 // d ^= (c + b) <<< 13
185 add v16.4s, v6.4s, v5.4s
186 ext v5.16b, v5.16b, v5.16b, #12
187 shl v17.4s, v16.4s, #13
188 ushr v16.4s, v16.4s, #19
189 orr v16.16b, v16.16b, v17.16b
190 eor v7.16b, v7.16b, v16.16b
191
192 // a ^= (d + c) <<< 18
193 add v16.4s, v7.4s, v6.4s
194 ext v6.16b, v6.16b, v6.16b, #8
195 ext v7.16b, v7.16b, v7.16b, #4
196 shl v17.4s, v16.4s, #18
197 ushr v16.4s, v16.4s, #14
198 orr v16.16b, v16.16b, v17.16b
199 eor v4.16b, v4.16b, v16.16b
200
201 b 0b
202
203 // Almost there. Firstly the feedfoward addition. Also, establish
204 // constants which will be useful later.
2059: add v0.4s, v0.4s, v4.4s // 0, 5, 10, 15
f2cd5445
MW
206 movi v16.2d, #0xffffffff // = (0, -1; 0, -1)
207 movi d17, #-1 // = (0, 0; -1, -1)
e492db88
MW
208 add v1.4s, v1.4s, v5.4s // 4, 9, 14, 3
209 add v2.4s, v2.4s, v6.4s // 8, 13, 2, 7
210 add v3.4s, v3.4s, v7.4s // 12, 1, 6, 11
211
212 // Next we must undo the permutation which was already applied to the
213 // input. The core trick is from Dan Bernstein's `armneon3'
214 // implementation, but with a lot of liposuction.
215 mov v4.16b, v0.16b
216
217 // Sort out the columns by pairs.
218 bif v0.16b, v3.16b, v16.16b // 0, 1, 10, 11
219 bif v3.16b, v2.16b, v16.16b // 12, 13, 6, 7
220 bif v2.16b, v1.16b, v16.16b // 8, 9, 2, 3
221 bif v1.16b, v4.16b, v16.16b // 4, 5, 14, 15
222 mov v4.16b, v0.16b
223 mov v5.16b, v3.16b
224
225 // Now fix up the remaining discrepancies.
226 bif v0.16b, v2.16b, v17.16b // 0, 1, 2, 3
227 bif v3.16b, v1.16b, v17.16b // 12, 13, 14, 15
228 bif v2.16b, v4.16b, v17.16b // 8, 9, 10, 11
229 bif v1.16b, v5.16b, v17.16b // 4, 5, 6, 7
230
231 // And with that, we're done.
232 st1 {v0.4s-v3.4s}, [x2]
233 ret
234
235ENDFUNC
236
237///----- That's all, folks --------------------------------------------------