progs/perftest.c: Use from Glibc syscall numbers.
[catacomb] / symm / salsa20-arm-neon.S
CommitLineData
704d59c8
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of Salsa20 for ARM
4///
5/// (c) 2016 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
df07f2c0 28/// Preliminaries.
704d59c8
MW
29
30#include "config.h"
31#include "asm-common.h"
32
704d59c8
MW
33 .arch armv7-a
34 .fpu neon
df07f2c0 35
bc9ac7eb 36 .text
704d59c8 37
df07f2c0 38///--------------------------------------------------------------------------
6d7e6032 39/// Main code.
df07f2c0 40
704d59c8
MW
41FUNC(salsa20_core_arm_neon)
42
43 // Arguments are in registers.
44 // r0 is the number of rounds to perform
45 // r1 points to the input matrix
46 // r2 points to the output matrix
47
48 // First job is to slurp the matrix into the SIMD registers. The
49 // words have already been permuted conveniently to make them line up
50 // better for SIMD processing.
51 //
52 // The textbook arrangement of the matrix is this.
53 //
54 // [C K K K]
55 // [K C N N]
56 // [T T C K]
57 // [K K K C]
58 //
59 // But we've rotated the columns up so that the main diagonal with
60 // the constants on it end up in the first row, giving something more
61 // like
62 //
63 // [C C C C]
64 // [K T K K]
65 // [T K K N]
66 // [K K N K]
67 //
68 // so the transformation looks like this:
69 //
70 // [ 0 1 2 3] [ 0 5 10 15] (a, q8)
71 // [ 4 5 6 7] --> [ 4 9 14 3] (b, q9)
72 // [ 8 9 10 11] [ 8 13 2 7] (c, q10)
73 // [12 13 14 15] [12 1 6 11] (d, q11)
74 //
704d59c8
MW
75 // We need a copy for later. Rather than waste time copying them by
76 // hand, we'll use the three-address nature of the instruction set.
77 // But this means that the main loop is offset by a bit.
43ea7558 78 vldmia r1, {QQ(q12, q15)}
704d59c8
MW
79
80 // Apply a column quarterround to each of the columns simultaneously,
81 // moving the results to their working registers. Alas, there
82 // doesn't seem to be a packed word rotate, so we have to synthesize
83 // it.
84
85 // b ^= (a + d) <<< 7
86 vadd.u32 q0, q12, q15
87 vshl.u32 q1, q0, #7
717b28ab
MW
88 vsri.u32 q1, q0, #25
89 veor q9, q13, q1
704d59c8
MW
90
91 // c ^= (b + a) <<< 9
92 vadd.u32 q0, q9, q12
93 vshl.u32 q1, q0, #9
717b28ab
MW
94 vsri.u32 q1, q0, #23
95 veor q10, q14, q1
704d59c8
MW
96
97 // d ^= (c + b) <<< 13
98 vadd.u32 q0, q10, q9
ff1e7aff 99 vext.32 q9, q9, q9, #3
704d59c8 100 vshl.u32 q1, q0, #13
717b28ab
MW
101 vsri.u32 q1, q0, #19
102 veor q11, q15, q1
704d59c8
MW
103
104 // a ^= (d + c) <<< 18
105 vadd.u32 q0, q11, q10
ff1e7aff
MW
106 vext.32 q10, q10, q10, #2
107 vext.32 q11, q11, q11, #1
704d59c8 108 vshl.u32 q1, q0, #18
717b28ab
MW
109 vsri.u32 q1, q0, #14
110 veor q8, q12, q1
704d59c8
MW
111
1120:
113 // The transpose conveniently only involves reordering elements of
114 // individual rows, which can be done quite easily, and reordering
115 // the rows themselves, which is a trivial renaming. It doesn't
116 // involve any movement of elements between rows.
117 //
118 // [ 0 5 10 15] [ 0 5 10 15] (a, q8)
119 // [ 4 9 14 3] --> [ 1 6 11 12] (b, q11)
120 // [ 8 13 2 7] [ 2 7 8 13] (c, q10)
121 // [12 1 6 11] [ 3 4 9 14] (d, q9)
122 //
123 // The reorderings have been pushed upwards to reduce delays.
124
125 // Apply the row quarterround to each of the columns (yes!)
126 // simultaneously.
127
128 // b ^= (a + d) <<< 7
129 vadd.u32 q0, q8, q9
130 vshl.u32 q1, q0, #7
717b28ab
MW
131 vsri.u32 q1, q0, #25
132 veor q11, q11, q1
704d59c8
MW
133
134 // c ^= (b + a) <<< 9
135 vadd.u32 q0, q11, q8
136 vshl.u32 q1, q0, #9
717b28ab
MW
137 vsri.u32 q1, q0, #23
138 veor q10, q10, q1
704d59c8
MW
139
140 // d ^= (c + b) <<< 13
141 vadd.u32 q0, q10, q11
70bc6059 142 vext.32 q11, q11, q11, #3
704d59c8 143 vshl.u32 q1, q0, #13
717b28ab
MW
144 vsri.u32 q1, q0, #19
145 veor q9, q9, q1
704d59c8
MW
146
147 // a ^= (d + c) <<< 18
148 vadd.u32 q0, q9, q10
70bc6059
MW
149 vext.32 q10, q10, q10, #2
150 vext.32 q9, q9, q9, #1
704d59c8 151 vshl.u32 q1, q0, #18
717b28ab
MW
152 vsri.u32 q1, q0, #14
153 veor q8, q8, q1
704d59c8
MW
154
155 // We had to undo the transpose ready for the next loop. Again, push
156 // back the reorderings to reduce latency. Decrement the loop
157 // counter and see if we should go round again.
158 subs r0, r0, #2
159 bls 9f
160
161 // Do the first half of the next round because this loop is offset.
162
163 // b ^= (a + d) <<< 7
164 vadd.u32 q0, q8, q11
165 vshl.u32 q1, q0, #7
717b28ab
MW
166 vsri.u32 q1, q0, #25
167 veor q9, q9, q1
704d59c8
MW
168
169 // c ^= (b + a) <<< 9
170 vadd.u32 q0, q9, q8
171 vshl.u32 q1, q0, #9
717b28ab
MW
172 vsri.u32 q1, q0, #23
173 veor q10, q10, q1
704d59c8
MW
174
175 // d ^= (c + b) <<< 13
176 vadd.u32 q0, q10, q9
70bc6059 177 vext.32 q9, q9, q9, #3
704d59c8 178 vshl.u32 q1, q0, #13
717b28ab
MW
179 vsri.u32 q1, q0, #19
180 veor q11, q11, q1
704d59c8
MW
181
182 // a ^= (d + c) <<< 18
183 vadd.u32 q0, q11, q10
70bc6059
MW
184 vext.32 q10, q10, q10, #2
185 vext.32 q11, q11, q11, #1
704d59c8 186 vshl.u32 q1, q0, #18
717b28ab
MW
187 vsri.u32 q1, q0, #14
188 veor q8, q8, q1
704d59c8
MW
189
190 b 0b
191
f48fb6a6
MW
192 // Almost there. Firstly the feedfoward addition. Also, establish a
193 // constant which will be useful later.
3cb47d27 1949: vadd.u32 q0, q8, q12 // 0, 5, 10, 15
f2cd5445 195 vmov.i64 q12, #0xffffffff // = (0, -1; 0, -1)
f48fb6a6
MW
196 vadd.u32 q1, q9, q13 // 4, 9, 14, 3
197 vadd.u32 q2, q10, q14 // 8, 13, 2, 7
198 vadd.u32 q3, q11, q15 // 12, 1, 6, 11
3cb47d27
MW
199
200 // Next we must undo the permutation which was already applied to the
f48fb6a6
MW
201 // input. The core trick is from Dan Bernstein's `armneon3'
202 // implementation, but with a lot of liposuction.
203 vmov q15, q0
204
205 // Sort out the columns by pairs.
206 vbif q0, q3, q12 // 0, 1, 10, 11
207 vbif q3, q2, q12 // 12, 13, 6, 7
208 vbif q2, q1, q12 // 8, 9, 2, 3
209 vbif q1, q15, q12 // 4, 5, 14, 15
210
211 // Now fix up the remaining discrepancies.
212 vswp D1(q0), D1(q2)
213 vswp D1(q1), D1(q3)
704d59c8
MW
214
215 // And with that, we're done.
43ea7558 216 vstmia r2, {QQ(q0, q3)}
704d59c8
MW
217 bx r14
218
219ENDFUNC
220
221///----- That's all, folks --------------------------------------------------