symm/salsa20-x86ish-sse2.S: Cosmetic fixes.
[catacomb] / symm / salsa20-arm-neon.S
CommitLineData
704d59c8
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of Salsa20 for ARM
4///
5/// (c) 2016 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
28/// External definitions.
29
30#include "config.h"
31#include "asm-common.h"
32
33///--------------------------------------------------------------------------
34/// Main.code.
35
36 .arch armv7-a
37 .fpu neon
bc9ac7eb 38 .text
704d59c8
MW
39
40FUNC(salsa20_core_arm_neon)
41
42 // Arguments are in registers.
43 // r0 is the number of rounds to perform
44 // r1 points to the input matrix
45 // r2 points to the output matrix
46
47 // First job is to slurp the matrix into the SIMD registers. The
48 // words have already been permuted conveniently to make them line up
49 // better for SIMD processing.
50 //
51 // The textbook arrangement of the matrix is this.
52 //
53 // [C K K K]
54 // [K C N N]
55 // [T T C K]
56 // [K K K C]
57 //
58 // But we've rotated the columns up so that the main diagonal with
59 // the constants on it end up in the first row, giving something more
60 // like
61 //
62 // [C C C C]
63 // [K T K K]
64 // [T K K N]
65 // [K K N K]
66 //
67 // so the transformation looks like this:
68 //
69 // [ 0 1 2 3] [ 0 5 10 15] (a, q8)
70 // [ 4 5 6 7] --> [ 4 9 14 3] (b, q9)
71 // [ 8 9 10 11] [ 8 13 2 7] (c, q10)
72 // [12 13 14 15] [12 1 6 11] (d, q11)
73 //
74 // [ 0 1 2 3] (a, q8)
75 // [ 4 5 6 7] (b, q9)
76 // [ 8 9 10 11] (c, q10)
77 // [12 13 14 15] (d, q11)
78 //
79 // We need a copy for later. Rather than waste time copying them by
80 // hand, we'll use the three-address nature of the instruction set.
81 // But this means that the main loop is offset by a bit.
82 vldmia r1, {d24-d31}
83
84 // Apply a column quarterround to each of the columns simultaneously,
85 // moving the results to their working registers. Alas, there
86 // doesn't seem to be a packed word rotate, so we have to synthesize
87 // it.
88
89 // b ^= (a + d) <<< 7
90 vadd.u32 q0, q12, q15
91 vshl.u32 q1, q0, #7
92 vshr.u32 q0, q0, #25
93 vorr q0, q0, q1
94 veor q9, q13, q0
95
96 // c ^= (b + a) <<< 9
97 vadd.u32 q0, q9, q12
98 vshl.u32 q1, q0, #9
99 vshr.u32 q0, q0, #23
100 vorr q0, q0, q1
101 veor q10, q14, q0
102
103 // d ^= (c + b) <<< 13
104 vadd.u32 q0, q10, q9
105 vext.32 q9, q9, q9, #3
106 vshl.u32 q1, q0, #13
107 vshr.u32 q0, q0, #19
108 vorr q0, q0, q1
109 veor q11, q15, q0
110
111 // a ^= (d + c) <<< 18
112 vadd.u32 q0, q11, q10
113 vext.32 q10, q10, q10, #2
114 vext.32 q11, q11, q11, #1
115 vshl.u32 q1, q0, #18
116 vshr.u32 q0, q0, #14
117 vorr q0, q0, q1
118 veor q8, q12, q0
119
1200:
121 // The transpose conveniently only involves reordering elements of
122 // individual rows, which can be done quite easily, and reordering
123 // the rows themselves, which is a trivial renaming. It doesn't
124 // involve any movement of elements between rows.
125 //
126 // [ 0 5 10 15] [ 0 5 10 15] (a, q8)
127 // [ 4 9 14 3] --> [ 1 6 11 12] (b, q11)
128 // [ 8 13 2 7] [ 2 7 8 13] (c, q10)
129 // [12 1 6 11] [ 3 4 9 14] (d, q9)
130 //
131 // The reorderings have been pushed upwards to reduce delays.
132
133 // Apply the row quarterround to each of the columns (yes!)
134 // simultaneously.
135
136 // b ^= (a + d) <<< 7
137 vadd.u32 q0, q8, q9
138 vshl.u32 q1, q0, #7
139 vshr.u32 q0, q0, #25
140 vorr q0, q0, q1
141 veor q11, q11, q0
142
143 // c ^= (b + a) <<< 9
144 vadd.u32 q0, q11, q8
145 vshl.u32 q1, q0, #9
146 vshr.u32 q0, q0, #23
147 vorr q0, q0, q1
148 veor q10, q10, q0
149
150 // d ^= (c + b) <<< 13
151 vadd.u32 q0, q10, q11
152 vext.32 q11, q11, q11, #3
153 vshl.u32 q1, q0, #13
154 vshr.u32 q0, q0, #19
155 vorr q0, q0, q1
156 veor q9, q9, q0
157
158 // a ^= (d + c) <<< 18
159 vadd.u32 q0, q9, q10
160 vext.32 q10, q10, q10, #2
161 vext.32 q9, q9, q9, #1
162 vshl.u32 q1, q0, #18
163 vshr.u32 q0, q0, #14
164 vorr q0, q0, q1
165 veor q8, q8, q0
166
167 // We had to undo the transpose ready for the next loop. Again, push
168 // back the reorderings to reduce latency. Decrement the loop
169 // counter and see if we should go round again.
170 subs r0, r0, #2
171 bls 9f
172
173 // Do the first half of the next round because this loop is offset.
174
175 // b ^= (a + d) <<< 7
176 vadd.u32 q0, q8, q11
177 vshl.u32 q1, q0, #7
178 vshr.u32 q0, q0, #25
179 vorr q0, q0, q1
180 veor q9, q9, q0
181
182 // c ^= (b + a) <<< 9
183 vadd.u32 q0, q9, q8
184 vshl.u32 q1, q0, #9
185 vshr.u32 q0, q0, #23
186 vorr q0, q0, q1
187 veor q10, q10, q0
188
189 // d ^= (c + b) <<< 13
190 vadd.u32 q0, q10, q9
191 vext.32 q9, q9, q9, #3
192 vshl.u32 q1, q0, #13
193 vshr.u32 q0, q0, #19
194 vorr q0, q0, q1
195 veor q11, q11, q0
196
197 // a ^= (d + c) <<< 18
198 vadd.u32 q0, q11, q10
199 vext.32 q10, q10, q10, #2
200 vext.32 q11, q11, q11, #1
201 vshl.u32 q1, q0, #18
202 vshr.u32 q0, q0, #14
203 vorr q0, q0, q1
204 veor q8, q8, q0
205
206 b 0b
207
208 // Almost there. Firstly the feedfoward addition, and then we have
209 // to write out the result. Here we have to undo the permutation
210 // which was already applied to the input.
2119: vadd.u32 q8, q8, q12
212 vadd.u32 q9, q9, q13
213 vadd.u32 q10, q10, q14
214 vadd.u32 q11, q11, q15
215
fa747d8d
MW
216 vst1.32 {d16[0]}, [r2 :32]!
217 vst1.32 {d22[1]}, [r2 :32]!
218 vst1.32 {d21[0]}, [r2 :32]!
219 vst1.32 {d19[1]}, [r2 :32]!
220
221 vst1.32 {d18[0]}, [r2 :32]!
222 vst1.32 {d16[1]}, [r2 :32]!
223 vst1.32 {d23[0]}, [r2 :32]!
224 vst1.32 {d21[1]}, [r2 :32]!
225
226 vst1.32 {d20[0]}, [r2 :32]!
227 vst1.32 {d18[1]}, [r2 :32]!
228 vst1.32 {d17[0]}, [r2 :32]!
229 vst1.32 {d23[1]}, [r2 :32]!
230
231 vst1.32 {d22[0]}, [r2 :32]!
232 vst1.32 {d20[1]}, [r2 :32]!
233 vst1.32 {d19[0]}, [r2 :32]!
234 vst1.32 {d17[1]}, [r2 :32]!
704d59c8
MW
235
236 // And with that, we're done.
237 bx r14
238
239ENDFUNC
240
241///----- That's all, folks --------------------------------------------------