math/Makefile.am, symm/Makefile.am: Use `--no-install' on oddball tests.
[catacomb] / symm / chacha-arm64.S
CommitLineData
e492db88
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of ChaCha for AArch64
4///
5/// (c) 2018 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
28/// External definitions.
29
30#include "config.h"
31#include "asm-common.h"
32
33///--------------------------------------------------------------------------
34/// Main.code.
35
36 .arch armv8-a
37 .text
38
39FUNC(chacha_core_arm64)
40
41 // Arguments are in registers.
42 // w0 is the number of rounds to perform
43 // x1 points to the input matrix
44 // x2 points to the output matrix
45
46 // First job is to slurp the matrix into the SIMD registers.
47 //
48 // [ 0 1 2 3] (a, v4)
49 // [ 4 5 6 7] (b, v5)
50 // [ 8 9 10 11] (c, v6)
51 // [12 13 14 15] (d, v7)
52 //
53 // We need a copy for later. Rather than waste time copying them by
54 // hand, we'll use the three-address nature of the instruction set.
55 // But this means that the main loop is offset by a bit.
56 ld1 {v0.4s-v3.4s}, [x1]
57
58 // a += b; d ^= a; d <<<= 16
59 add v4.4s, v0.4s, v1.4s
60 eor v7.16b, v3.16b, v4.16b
61 shl v16.4s, v7.4s, #16
62 ushr v7.4s, v7.4s, #16
63 orr v7.16b, v7.16b, v16.16b
64
65 // c += d; b ^= c; b <<<= 12
66 add v6.4s, v2.4s, v7.4s
67 eor v5.16b, v1.16b, v6.16b
68 shl v16.4s, v5.4s, #12
69 ushr v5.4s, v5.4s, #20
70 orr v5.16b, v5.16b, v16.16b
71
720:
73 // Apply (the rest of) a column quarterround to each of the columns
74 // simultaneously. Alas, there doesn't seem to be a packed word
75 // rotate, so we have to synthesize it.
76
77 // a += b; d ^= a; d <<<= 8
78 add v4.4s, v4.4s, v5.4s
79 eor v7.16b, v7.16b, v4.16b
80 shl v16.4s, v7.4s, #8
81 ushr v7.4s, v7.4s, #24
82 orr v7.16b, v7.16b, v16.16b
83
84 // c += d; b ^= c; b <<<= 7
85 add v6.4s, v6.4s, v7.4s
86 ext v7.16b, v7.16b, v7.16b, #12
87 eor v5.16b, v5.16b, v6.16b
88 ext v6.16b, v6.16b, v6.16b, #8
89 shl v16.4s, v5.4s, #7
90 ushr v5.4s, v5.4s, #25
91 orr v5.16b, v5.16b, v16.16b
92
93 // The not-quite-transpose conveniently only involves reordering
94 // elements of individual rows, which can be done quite easily. It
95 // doesn't involve any movement of elements between rows, or even
96 // renaming of the rows.
97 //
98 // [ 0 1 2 3] [ 0 1 2 3] (a, v4)
99 // [ 4 5 6 7] --> [ 5 6 7 4] (b, v5)
100 // [ 8 9 10 11] [10 11 8 9] (c, v6)
101 // [12 13 14 15] [15 12 13 14] (d, v7)
102 //
103 // The reorderings have for the most part been pushed upwards to
104 // reduce delays.
105 ext v5.16b, v5.16b, v5.16b, #4
106 sub w0, w0, #2
107
108 // Apply the diagonal quarterround to each of the columns
109 // simultaneously.
110
111 // a += b; d ^= a; d <<<= 16
112 add v4.4s, v4.4s, v5.4s
113 eor v7.16b, v7.16b, v4.16b
114 shl v16.4s, v7.4s, #16
115 ushr v7.4s, v7.4s, #16
116 orr v7.16b, v7.16b, v16.16b
117
118 // c += d; b ^= c; b <<<= 12
119 add v6.4s, v6.4s, v7.4s
120 eor v5.16b, v5.16b, v6.16b
121 shl v16.4s, v5.4s, #12
122 ushr v5.4s, v5.4s, #20
123 orr v5.16b, v5.16b, v16.16b
124
125 // a += b; d ^= a; d <<<= 8
126 add v4.4s, v4.4s, v5.4s
127 eor v7.16b, v7.16b, v4.16b
128 shl v16.4s, v7.4s, #8
129 ushr v7.4s, v7.4s, #24
130 orr v7.16b, v7.16b, v16.16b
131
132 // c += d; b ^= c; b <<<= 7
133 add v6.4s, v6.4s, v7.4s
134 ext v7.16b, v7.16b, v7.16b, #4
135 eor v5.16b, v5.16b, v6.16b
136 ext v6.16b, v6.16b, v6.16b, #8
137 shl v16.4s, v5.4s, #7
138 ushr v5.4s, v5.4s, #25
139 orr v5.16b, v5.16b, v16.16b
140
141 // Finally finish off undoing the transpose, and we're done for this
142 // doubleround. Again, most of this was done above so we don't have
143 // to wait for the reorderings.
144 ext v5.16b, v5.16b, v5.16b, #12
145
146 // Decrement the loop counter and see if we should go round again.
147 cbz w0, 9f
148
149 // Do the first part of the next round because this loop is offset.
150
151 // a += b; d ^= a; d <<<= 16
152 add v4.4s, v4.4s, v5.4s
153 eor v7.16b, v7.16b, v4.16b
154 shl v16.4s, v7.4s, #16
155 ushr v7.4s, v7.4s, #16
156 orr v7.16b, v7.16b, v16.16b
157
158 // c += d; b ^= c; b <<<= 12
159 add v6.4s, v6.4s, v7.4s
160 eor v5.16b, v5.16b, v6.16b
161 shl v16.4s, v5.4s, #12
162 ushr v5.4s, v5.4s, #20
163 orr v5.16b, v5.16b, v16.16b
164
165 b 0b
166
167 // Almost there. Firstly the feedfoward addition.
1689: add v0.4s, v0.4s, v4.4s
169 add v1.4s, v1.4s, v5.4s
170 add v2.4s, v2.4s, v6.4s
171 add v3.4s, v3.4s, v7.4s
172
173 // And now we write out the result.
174 st1 {v0.4s-v3.4s}, [x2]
175
176 // And with that, we're done.
177 ret
178
179ENDFUNC
180
181///----- That's all, folks --------------------------------------------------