1 /// -*- mode: asm; asm-comment-char: ?/ -*-
3 /// Fancy SIMD implementation of Salsa20 for AArch64
5 /// (c) 2018 Straylight/Edgeware
8 ///----- Licensing notice ---------------------------------------------------
10 /// This file is part of Catacomb.
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
27 ///--------------------------------------------------------------------------
31 #include "asm-common.h"
37 ///--------------------------------------------------------------------------
40 FUNC(salsa20_core_arm64)
42 // Arguments are in registers.
43 // w0 is the number of rounds to perform
44 // x1 points to the input matrix
45 // x2 points to the output matrix
47 // First job is to slurp the matrix into the SIMD registers. The
48 // words have already been permuted conveniently to make them line up
49 // better for SIMD processing.
51 // The textbook arrangement of the matrix is this.
58 // But we've rotated the columns up so that the main diagonal with
59 // the constants on it end up in the first row, giving something more
67 // so the transformation looks like this:
69 // [ 0 1 2 3] [ 0 5 10 15] (a, v4)
70 // [ 4 5 6 7] --> [ 4 9 14 3] (b, v5)
71 // [ 8 9 10 11] [ 8 13 2 7] (c, v6)
72 // [12 13 14 15] [12 1 6 11] (d, v7)
74 // We need a copy for later. Rather than waste time copying them by
75 // hand, we'll use the three-address nature of the instruction set.
76 // But this means that the main loop is offset by a bit.
77 ld1 {v0.4s-v3.4s}, [x1]
79 // Apply a column quarterround to each of the columns simultaneously,
80 // moving the results to their working registers. Alas, there
81 // doesn't seem to be a packed word rotate, so we have to synthesize
85 add v16.4s, v0.4s, v3.4s
86 shl v17.4s, v16.4s, #7
87 sri v17.4s, v16.4s, #25
88 eor v5.16b, v1.16b, v17.16b
91 add v16.4s, v5.4s, v0.4s
92 shl v17.4s, v16.4s, #9
93 sri v17.4s, v16.4s, #23
94 eor v6.16b, v2.16b, v17.16b
96 // d ^= (c + b) <<< 13
97 add v16.4s, v6.4s, v5.4s
98 ext v5.16b, v5.16b, v5.16b, #12
99 shl v17.4s, v16.4s, #13
100 sri v17.4s, v16.4s, #19
101 eor v7.16b, v3.16b, v17.16b
103 // a ^= (d + c) <<< 18
104 add v16.4s, v7.4s, v6.4s
105 ext v6.16b, v6.16b, v6.16b, #8
106 ext v7.16b, v7.16b, v7.16b, #4
107 shl v17.4s, v16.4s, #18
108 sri v17.4s, v16.4s, #14
109 eor v4.16b, v0.16b, v17.16b
112 // The transpose conveniently only involves reordering elements of
113 // individual rows, which can be done quite easily, and reordering
114 // the rows themselves, which is a trivial renaming. It doesn't
115 // involve any movement of elements between rows.
117 // [ 0 5 10 15] [ 0 5 10 15] (a, v4)
118 // [ 4 9 14 3] --> [ 1 6 11 12] (b, v7)
119 // [ 8 13 2 7] [ 2 7 8 13] (c, v6)
120 // [12 1 6 11] [ 3 4 9 14] (d, v5)
122 // The reorderings have been pushed upwards to reduce delays.
125 // Apply the row quarterround to each of the columns (yes!)
128 // b ^= (a + d) <<< 7
129 add v16.4s, v4.4s, v5.4s
130 shl v17.4s, v16.4s, #7
131 sri v17.4s, v16.4s, #25
132 eor v7.16b, v7.16b, v17.16b
134 // c ^= (b + a) <<< 9
135 add v16.4s, v7.4s, v4.4s
136 shl v17.4s, v16.4s, #9
137 sri v17.4s, v16.4s, #23
138 eor v6.16b, v6.16b, v17.16b
140 // d ^= (c + b) <<< 13
141 add v16.4s, v6.4s, v7.4s
142 ext v7.16b, v7.16b, v7.16b, #12
143 shl v17.4s, v16.4s, #13
144 sri v17.4s, v16.4s, #19
145 eor v5.16b, v5.16b, v17.16b
147 // a ^= (d + c) <<< 18
148 add v16.4s, v5.4s, v6.4s
149 ext v6.16b, v6.16b, v6.16b, #8
150 ext v5.16b, v5.16b, v5.16b, #4
151 shl v17.4s, v16.4s, #18
152 sri v17.4s, v16.4s, #14
153 eor v4.16b, v4.16b, v17.16b
155 // We had to undo the transpose ready for the next loop. Again, push
156 // back the reorderings to reduce latency. Decrement the loop
157 // counter and see if we should go round again.
160 // Do the first half of the next round because this loop is offset.
162 // b ^= (a + d) <<< 7
163 add v16.4s, v4.4s, v7.4s
164 shl v17.4s, v16.4s, #7
165 sri v17.4s, v16.4s, #25
166 eor v5.16b, v5.16b, v17.16b
168 // c ^= (b + a) <<< 9
169 add v16.4s, v5.4s, v4.4s
170 shl v17.4s, v16.4s, #9
171 sri v17.4s, v16.4s, #23
172 eor v6.16b, v6.16b, v17.16b
174 // d ^= (c + b) <<< 13
175 add v16.4s, v6.4s, v5.4s
176 ext v5.16b, v5.16b, v5.16b, #12
177 shl v17.4s, v16.4s, #13
178 sri v17.4s, v16.4s, #19
179 eor v7.16b, v7.16b, v17.16b
181 // a ^= (d + c) <<< 18
182 add v16.4s, v7.4s, v6.4s
183 ext v6.16b, v6.16b, v6.16b, #8
184 ext v7.16b, v7.16b, v7.16b, #4
185 shl v17.4s, v16.4s, #18
186 sri v17.4s, v16.4s, #14
187 eor v4.16b, v4.16b, v17.16b
191 // Almost there. Firstly the feedfoward addition. Also, establish
192 // constants which will be useful later.
193 9: add v0.4s, v0.4s, v4.4s // 0, 5, 10, 15
194 movi v16.2d, #0xffffffff // = (0, -1; 0, -1)
195 movi d17, #-1 // = (0, 0; -1, -1)
196 add v1.4s, v1.4s, v5.4s // 4, 9, 14, 3
197 add v2.4s, v2.4s, v6.4s // 8, 13, 2, 7
198 add v3.4s, v3.4s, v7.4s // 12, 1, 6, 11
200 // Next we must undo the permutation which was already applied to the
201 // input. The core trick is from Dan Bernstein's `armneon3'
202 // implementation, but with a lot of liposuction.
205 // Sort out the columns by pairs.
206 bif v0.16b, v3.16b, v16.16b // 0, 1, 10, 11
207 bif v3.16b, v2.16b, v16.16b // 12, 13, 6, 7
208 bif v2.16b, v1.16b, v16.16b // 8, 9, 2, 3
209 bif v1.16b, v4.16b, v16.16b // 4, 5, 14, 15
213 // Now fix up the remaining discrepancies.
214 bif v0.16b, v2.16b, v17.16b // 0, 1, 2, 3
215 bif v3.16b, v1.16b, v17.16b // 12, 13, 14, 15
216 bif v2.16b, v4.16b, v17.16b // 8, 9, 10, 11
217 bif v1.16b, v5.16b, v17.16b // 4, 5, 6, 7
219 // And with that, we're done.
220 st1 {v0.4s-v3.4s}, [x2]
225 ///----- That's all, folks --------------------------------------------------