61ac51a1a5329888ac4dea55b392d92b8c851c73
[catacomb] / symm / chacha-arm64.S
1 /// -*- mode: asm; asm-comment-char: ?/ -*-
2 ///
3 /// Fancy SIMD implementation of ChaCha for AArch64
4 ///
5 /// (c) 2018 Straylight/Edgeware
6 ///
7
8 ///----- Licensing notice ---------------------------------------------------
9 ///
10 /// This file is part of Catacomb.
11 ///
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
16 ///
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
21 ///
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
26
27 ///--------------------------------------------------------------------------
28 /// Preliminaries.
29
30 #include "config.h"
31 #include "asm-common.h"
32
33 .arch armv8-a
34
35 .text
36
37 ///--------------------------------------------------------------------------
38 /// Main.code.
39
40 FUNC(chacha_core_arm64)
41
42 // Arguments are in registers.
43 // w0 is the number of rounds to perform
44 // x1 points to the input matrix
45 // x2 points to the output matrix
46
47 // First job is to slurp the matrix into the SIMD registers.
48 //
49 // [ 0 1 2 3] (a, v4)
50 // [ 4 5 6 7] (b, v5)
51 // [ 8 9 10 11] (c, v6)
52 // [12 13 14 15] (d, v7)
53 //
54 // We need a copy for later. Rather than waste time copying them by
55 // hand, we'll use the three-address nature of the instruction set.
56 // But this means that the main loop is offset by a bit.
57 ld1 {v0.4s-v3.4s}, [x1]
58
59 // a += b; d ^= a; d <<<= 16
60 add v4.4s, v0.4s, v1.4s
61 eor v7.16b, v3.16b, v4.16b
62 shl v16.4s, v7.4s, #16
63 ushr v7.4s, v7.4s, #16
64 orr v7.16b, v7.16b, v16.16b
65
66 // c += d; b ^= c; b <<<= 12
67 add v6.4s, v2.4s, v7.4s
68 eor v5.16b, v1.16b, v6.16b
69 shl v16.4s, v5.4s, #12
70 ushr v5.4s, v5.4s, #20
71 orr v5.16b, v5.16b, v16.16b
72
73 0:
74 // Apply (the rest of) a column quarterround to each of the columns
75 // simultaneously. Alas, there doesn't seem to be a packed word
76 // rotate, so we have to synthesize it.
77
78 // a += b; d ^= a; d <<<= 8
79 add v4.4s, v4.4s, v5.4s
80 eor v7.16b, v7.16b, v4.16b
81 shl v16.4s, v7.4s, #8
82 ushr v7.4s, v7.4s, #24
83 orr v7.16b, v7.16b, v16.16b
84
85 // c += d; b ^= c; b <<<= 7
86 add v6.4s, v6.4s, v7.4s
87 ext v7.16b, v7.16b, v7.16b, #12
88 eor v5.16b, v5.16b, v6.16b
89 ext v6.16b, v6.16b, v6.16b, #8
90 shl v16.4s, v5.4s, #7
91 ushr v5.4s, v5.4s, #25
92 orr v5.16b, v5.16b, v16.16b
93
94 // The not-quite-transpose conveniently only involves reordering
95 // elements of individual rows, which can be done quite easily. It
96 // doesn't involve any movement of elements between rows, or even
97 // renaming of the rows.
98 //
99 // [ 0 1 2 3] [ 0 1 2 3] (a, v4)
100 // [ 4 5 6 7] --> [ 5 6 7 4] (b, v5)
101 // [ 8 9 10 11] [10 11 8 9] (c, v6)
102 // [12 13 14 15] [15 12 13 14] (d, v7)
103 //
104 // The reorderings have for the most part been pushed upwards to
105 // reduce delays.
106 ext v5.16b, v5.16b, v5.16b, #4
107 sub w0, w0, #2
108
109 // Apply the diagonal quarterround to each of the columns
110 // simultaneously.
111
112 // a += b; d ^= a; d <<<= 16
113 add v4.4s, v4.4s, v5.4s
114 eor v7.16b, v7.16b, v4.16b
115 shl v16.4s, v7.4s, #16
116 ushr v7.4s, v7.4s, #16
117 orr v7.16b, v7.16b, v16.16b
118
119 // c += d; b ^= c; b <<<= 12
120 add v6.4s, v6.4s, v7.4s
121 eor v5.16b, v5.16b, v6.16b
122 shl v16.4s, v5.4s, #12
123 ushr v5.4s, v5.4s, #20
124 orr v5.16b, v5.16b, v16.16b
125
126 // a += b; d ^= a; d <<<= 8
127 add v4.4s, v4.4s, v5.4s
128 eor v7.16b, v7.16b, v4.16b
129 shl v16.4s, v7.4s, #8
130 ushr v7.4s, v7.4s, #24
131 orr v7.16b, v7.16b, v16.16b
132
133 // c += d; b ^= c; b <<<= 7
134 add v6.4s, v6.4s, v7.4s
135 ext v7.16b, v7.16b, v7.16b, #4
136 eor v5.16b, v5.16b, v6.16b
137 ext v6.16b, v6.16b, v6.16b, #8
138 shl v16.4s, v5.4s, #7
139 ushr v5.4s, v5.4s, #25
140 orr v5.16b, v5.16b, v16.16b
141
142 // Finally finish off undoing the transpose, and we're done for this
143 // doubleround. Again, most of this was done above so we don't have
144 // to wait for the reorderings.
145 ext v5.16b, v5.16b, v5.16b, #12
146
147 // Decrement the loop counter and see if we should go round again.
148 cbz w0, 9f
149
150 // Do the first part of the next round because this loop is offset.
151
152 // a += b; d ^= a; d <<<= 16
153 add v4.4s, v4.4s, v5.4s
154 eor v7.16b, v7.16b, v4.16b
155 shl v16.4s, v7.4s, #16
156 ushr v7.4s, v7.4s, #16
157 orr v7.16b, v7.16b, v16.16b
158
159 // c += d; b ^= c; b <<<= 12
160 add v6.4s, v6.4s, v7.4s
161 eor v5.16b, v5.16b, v6.16b
162 shl v16.4s, v5.4s, #12
163 ushr v5.4s, v5.4s, #20
164 orr v5.16b, v5.16b, v16.16b
165
166 b 0b
167
168 // Almost there. Firstly the feedfoward addition.
169 9: add v0.4s, v0.4s, v4.4s
170 add v1.4s, v1.4s, v5.4s
171 add v2.4s, v2.4s, v6.4s
172 add v3.4s, v3.4s, v7.4s
173
174 // And now we write out the result.
175 st1 {v0.4s-v3.4s}, [x2]
176
177 // And with that, we're done.
178 ret
179
180 ENDFUNC
181
182 ///----- That's all, folks --------------------------------------------------