a900db762c2dc245772e29f8cbebd16018b0770f
[catacomb] / symm / chacha-arm-neon.S
1 /// -*- mode: asm; asm-comment-char: ?/ -*-
2 ///
3 /// Fancy SIMD implementation of ChaCha for ARM
4 ///
5 /// (c) 2016 Straylight/Edgeware
6 ///
7
8 ///----- Licensing notice ---------------------------------------------------
9 ///
10 /// This file is part of Catacomb.
11 ///
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
16 ///
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
21 ///
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
26
27 ///--------------------------------------------------------------------------
28 /// Preliminaries.
29
30 #include "config.h"
31 #include "asm-common.h"
32
33 .arch armv7-a
34 .fpu neon
35
36 .text
37
38 ///--------------------------------------------------------------------------
39 /// Main.code.
40
41 FUNC(chacha_core_arm_neon)
42
43 // Arguments are in registers.
44 // r0 is the number of rounds to perform
45 // r1 points to the input matrix
46 // r2 points to the output matrix
47
48 // First job is to slurp the matrix into the SIMD registers. vldm
49 // and vstm work on word-aligned data, so this is fine.
50 //
51 // [ 0 1 2 3] (a, q8)
52 // [ 4 5 6 7] (b, q9)
53 // [ 8 9 10 11] (c, q10)
54 // [12 13 14 15] (d, q11)
55 //
56 // We need a copy for later. Rather than waste time copying them by
57 // hand, we'll use the three-address nature of the instruction set.
58 // But this means that the main loop is offset by a bit.
59 vldmia r1, {QQ(q12, q15)}
60
61 // a += b; d ^= a; d <<<= 16
62 vadd.u32 q8, q12, q13
63 veor q11, q15, q8
64 vshl.u32 q0, q11, #16
65 vshr.u32 q11, q11, #16
66 vorr q11, q11, q0
67
68 // c += d; b ^= c; b <<<= 12
69 vadd.u32 q10, q14, q11
70 veor q9, q13, q10
71 vshl.u32 q0, q9, #12
72 vshr.u32 q9, q9, #20
73 vorr q9, q9, q0
74
75 0:
76 // Apply (the rest of) a column quarterround to each of the columns
77 // simultaneously. Alas, there doesn't seem to be a packed word
78 // rotate, so we have to synthesize it.
79
80 // a += b; d ^= a; d <<<= 8
81 vadd.u32 q8, q8, q9
82 veor q11, q11, q8
83 vshl.u32 q0, q11, #8
84 vshr.u32 q11, q11, #24
85 vorr q11, q11, q0
86
87 // c += d; b ^= c; b <<<= 7
88 vadd.u32 q10, q10, q11
89 vext.32 q11, q11, q11, #3
90 veor q9, q9, q10
91 vext.32 q10, q10, q10, #2
92 vshl.u32 q0, q9, #7
93 vshr.u32 q9, q9, #25
94 vorr q9, q9, q0
95
96 // The not-quite-transpose conveniently only involves reordering
97 // elements of individual rows, which can be done quite easily. It
98 // doesn't involve any movement of elements between rows, or even
99 // renaming of the rows.
100 //
101 // [ 0 1 2 3] [ 0 1 2 3] (a, q8)
102 // [ 4 5 6 7] --> [ 5 6 7 4] (b, q9)
103 // [ 8 9 10 11] [10 11 8 9] (c, q10)
104 // [12 13 14 15] [15 12 13 14] (d, q11)
105 //
106 // The reorderings have for the most part been pushed upwards to
107 // reduce delays.
108 vext.32 q9, q9, q9, #1
109
110 // Apply the diagonal quarterround to each of the columns
111 // simultaneously.
112
113 // a += b; d ^= a; d <<<= 16
114 vadd.u32 q8, q8, q9
115 veor q11, q11, q8
116 vshl.u32 q0, q11, #16
117 vshr.u32 q11, q11, #16
118 vorr q11, q11, q0
119
120 // c += d; b ^= c; b <<<= 12
121 vadd.u32 q10, q10, q11
122 veor q9, q9, q10
123 vshl.u32 q0, q9, #12
124 vshr.u32 q9, q9, #20
125 vorr q9, q9, q0
126
127 // a += b; d ^= a; d <<<= 8
128 vadd.u32 q8, q8, q9
129 veor q11, q11, q8
130 vshl.u32 q0, q11, #8
131 vshr.u32 q11, q11, #24
132 vorr q11, q11, q0
133
134 // c += d; b ^= c; b <<<= 7
135 vadd.u32 q10, q10, q11
136 vext.32 q11, q11, q11, #1
137 veor q9, q9, q10
138 vext.32 q10, q10, q10, #2
139 vshl.u32 q0, q9, #7
140 vshr.u32 q9, q9, #25
141 vorr q9, q9, q0
142
143 // Finally finish off undoing the transpose, and we're done for this
144 // doubleround. Again, most of this was done above so we don't have
145 // to wait for the reorderings.
146 vext.32 q9, q9, q9, #3
147
148 // Decrement the loop counter and see if we should go round again.
149 subs r0, r0, #2
150 bls 9f
151
152 // Do the first part of the next round because this loop is offset.
153
154 // a += b; d ^= a; d <<<= 16
155 vadd.u32 q8, q8, q9
156 veor q11, q11, q8
157 vshl.u32 q0, q11, #16
158 vshr.u32 q11, q11, #16
159 vorr q11, q11, q0
160
161 // c += d; b ^= c; b <<<= 12
162 vadd.u32 q10, q10, q11
163 veor q9, q9, q10
164 vshl.u32 q0, q9, #12
165 vshr.u32 q9, q9, #20
166 vorr q9, q9, q0
167
168 b 0b
169
170 // Almost there. Firstly the feedfoward addition.
171 9: vadd.u32 q8, q8, q12
172 vadd.u32 q9, q9, q13
173 vadd.u32 q10, q10, q14
174 vadd.u32 q11, q11, q15
175
176 // And now we write out the result.
177 vstmia r2, {QQ(q8, q11)}
178
179 // And with that, we're done.
180 bx r14
181
182 ENDFUNC
183
184 ///----- That's all, folks --------------------------------------------------