math/Makefile.am, symm/Makefile.am: Use `--no-install' on oddball tests.
[catacomb] / symm / chacha-arm-neon.S
CommitLineData
704d59c8
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of ChaCha for ARM
4///
5/// (c) 2016 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
28/// External definitions.
29
30#include "config.h"
31#include "asm-common.h"
32
33///--------------------------------------------------------------------------
34/// Main.code.
35
36 .arch armv7-a
37 .fpu neon
bc9ac7eb 38 .text
704d59c8
MW
39
40FUNC(chacha_core_arm_neon)
41
42 // Arguments are in registers.
43 // r0 is the number of rounds to perform
44 // r1 points to the input matrix
45 // r2 points to the output matrix
46
47 // First job is to slurp the matrix into the SIMD registers. vldm
48 // and vstm work on word-aligned data, so this is fine.
49 //
50 // [ 0 1 2 3] (a, q8)
51 // [ 4 5 6 7] (b, q9)
52 // [ 8 9 10 11] (c, q10)
53 // [12 13 14 15] (d, q11)
54 //
55 // We need a copy for later. Rather than waste time copying them by
56 // hand, we'll use the three-address nature of the instruction set.
57 // But this means that the main loop is offset by a bit.
43ea7558 58 vldmia r1, {QQ(q12, q15)}
704d59c8
MW
59
60 // a += b; d ^= a; d <<<= 16
61 vadd.u32 q8, q12, q13
62 veor q11, q15, q8
63 vshl.u32 q0, q11, #16
64 vshr.u32 q11, q11, #16
65 vorr q11, q11, q0
66
67 // c += d; b ^= c; b <<<= 12
68 vadd.u32 q10, q14, q11
69 veor q9, q13, q10
70 vshl.u32 q0, q9, #12
71 vshr.u32 q9, q9, #20
72 vorr q9, q9, q0
73
740:
75 // Apply (the rest of) a column quarterround to each of the columns
76 // simultaneously. Alas, there doesn't seem to be a packed word
77 // rotate, so we have to synthesize it.
78
79 // a += b; d ^= a; d <<<= 8
80 vadd.u32 q8, q8, q9
81 veor q11, q11, q8
82 vshl.u32 q0, q11, #8
83 vshr.u32 q11, q11, #24
84 vorr q11, q11, q0
85
86 // c += d; b ^= c; b <<<= 7
87 vadd.u32 q10, q10, q11
70bc6059 88 vext.32 q11, q11, q11, #3
704d59c8 89 veor q9, q9, q10
70bc6059 90 vext.32 q10, q10, q10, #2
704d59c8
MW
91 vshl.u32 q0, q9, #7
92 vshr.u32 q9, q9, #25
93 vorr q9, q9, q0
94
95 // The not-quite-transpose conveniently only involves reordering
96 // elements of individual rows, which can be done quite easily. It
97 // doesn't involve any movement of elements between rows, or even
98 // renaming of the rows.
99 //
100 // [ 0 1 2 3] [ 0 1 2 3] (a, q8)
101 // [ 4 5 6 7] --> [ 5 6 7 4] (b, q9)
102 // [ 8 9 10 11] [10 11 8 9] (c, q10)
103 // [12 13 14 15] [15 12 13 14] (d, q11)
104 //
105 // The reorderings have for the most part been pushed upwards to
106 // reduce delays.
107 vext.32 q9, q9, q9, #1
108
109 // Apply the diagonal quarterround to each of the columns
110 // simultaneously.
111
112 // a += b; d ^= a; d <<<= 16
113 vadd.u32 q8, q8, q9
114 veor q11, q11, q8
115 vshl.u32 q0, q11, #16
116 vshr.u32 q11, q11, #16
117 vorr q11, q11, q0
118
119 // c += d; b ^= c; b <<<= 12
120 vadd.u32 q10, q10, q11
121 veor q9, q9, q10
122 vshl.u32 q0, q9, #12
123 vshr.u32 q9, q9, #20
124 vorr q9, q9, q0
125
126 // a += b; d ^= a; d <<<= 8
127 vadd.u32 q8, q8, q9
128 veor q11, q11, q8
129 vshl.u32 q0, q11, #8
130 vshr.u32 q11, q11, #24
131 vorr q11, q11, q0
132
133 // c += d; b ^= c; b <<<= 7
134 vadd.u32 q10, q10, q11
70bc6059 135 vext.32 q11, q11, q11, #1
704d59c8 136 veor q9, q9, q10
70bc6059 137 vext.32 q10, q10, q10, #2
704d59c8
MW
138 vshl.u32 q0, q9, #7
139 vshr.u32 q9, q9, #25
140 vorr q9, q9, q0
141
142 // Finally finish off undoing the transpose, and we're done for this
143 // doubleround. Again, most of this was done above so we don't have
144 // to wait for the reorderings.
145 vext.32 q9, q9, q9, #3
146
147 // Decrement the loop counter and see if we should go round again.
148 subs r0, r0, #2
149 bls 9f
150
151 // Do the first part of the next round because this loop is offset.
152
153 // a += b; d ^= a; d <<<= 16
154 vadd.u32 q8, q8, q9
155 veor q11, q11, q8
156 vshl.u32 q0, q11, #16
157 vshr.u32 q11, q11, #16
158 vorr q11, q11, q0
159
160 // c += d; b ^= c; b <<<= 12
161 vadd.u32 q10, q10, q11
162 veor q9, q9, q10
163 vshl.u32 q0, q9, #12
164 vshr.u32 q9, q9, #20
165 vorr q9, q9, q0
166
167 b 0b
168
169 // Almost there. Firstly the feedfoward addition.
1709: vadd.u32 q8, q8, q12
171 vadd.u32 q9, q9, q13
172 vadd.u32 q10, q10, q14
173 vadd.u32 q11, q11, q15
174
175 // And now we write out the result.
43ea7558 176 vstmia r2, {QQ(q8, q11)}
704d59c8
MW
177
178 // And with that, we're done.
179 bx r14
180
181ENDFUNC
182
183///----- That's all, folks --------------------------------------------------