symm/{chacha,salsa20}-{arm64,arm-neon}.S: Improve rotation code.
[catacomb] / symm / chacha-arm64.S
CommitLineData
e492db88
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of ChaCha for AArch64
4///
5/// (c) 2018 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
df07f2c0 28/// Preliminaries.
e492db88
MW
29
30#include "config.h"
31#include "asm-common.h"
32
e492db88 33 .arch armv8-a
df07f2c0 34
e492db88
MW
35 .text
36
df07f2c0 37///--------------------------------------------------------------------------
6d7e6032 38/// Main code.
df07f2c0 39
e492db88
MW
40FUNC(chacha_core_arm64)
41
42 // Arguments are in registers.
43 // w0 is the number of rounds to perform
44 // x1 points to the input matrix
45 // x2 points to the output matrix
46
47 // First job is to slurp the matrix into the SIMD registers.
48 //
49 // [ 0 1 2 3] (a, v4)
50 // [ 4 5 6 7] (b, v5)
51 // [ 8 9 10 11] (c, v6)
52 // [12 13 14 15] (d, v7)
53 //
54 // We need a copy for later. Rather than waste time copying them by
55 // hand, we'll use the three-address nature of the instruction set.
56 // But this means that the main loop is offset by a bit.
57 ld1 {v0.4s-v3.4s}, [x1]
58
59 // a += b; d ^= a; d <<<= 16
60 add v4.4s, v0.4s, v1.4s
61 eor v7.16b, v3.16b, v4.16b
717b28ab 62 rev32 v7.8h, v7.8h
e492db88
MW
63
64 // c += d; b ^= c; b <<<= 12
65 add v6.4s, v2.4s, v7.4s
717b28ab
MW
66 eor v16.16b, v1.16b, v6.16b
67 shl v5.4s, v16.4s, #12
68 sri v5.4s, v16.4s, #20
e492db88
MW
69
700:
71 // Apply (the rest of) a column quarterround to each of the columns
72 // simultaneously. Alas, there doesn't seem to be a packed word
73 // rotate, so we have to synthesize it.
74
75 // a += b; d ^= a; d <<<= 8
76 add v4.4s, v4.4s, v5.4s
717b28ab
MW
77 eor v16.16b, v7.16b, v4.16b
78 shl v7.4s, v16.4s, #8
79 sri v7.4s, v16.4s, #24
e492db88
MW
80
81 // c += d; b ^= c; b <<<= 7
82 add v6.4s, v6.4s, v7.4s
83 ext v7.16b, v7.16b, v7.16b, #12
717b28ab 84 eor v16.16b, v5.16b, v6.16b
e492db88 85 ext v6.16b, v6.16b, v6.16b, #8
717b28ab
MW
86 shl v5.4s, v16.4s, #7
87 sri v5.4s, v16.4s, #25
e492db88
MW
88
89 // The not-quite-transpose conveniently only involves reordering
90 // elements of individual rows, which can be done quite easily. It
91 // doesn't involve any movement of elements between rows, or even
92 // renaming of the rows.
93 //
94 // [ 0 1 2 3] [ 0 1 2 3] (a, v4)
95 // [ 4 5 6 7] --> [ 5 6 7 4] (b, v5)
96 // [ 8 9 10 11] [10 11 8 9] (c, v6)
97 // [12 13 14 15] [15 12 13 14] (d, v7)
98 //
99 // The reorderings have for the most part been pushed upwards to
100 // reduce delays.
101 ext v5.16b, v5.16b, v5.16b, #4
102 sub w0, w0, #2
103
104 // Apply the diagonal quarterround to each of the columns
105 // simultaneously.
106
107 // a += b; d ^= a; d <<<= 16
108 add v4.4s, v4.4s, v5.4s
109 eor v7.16b, v7.16b, v4.16b
717b28ab 110 rev32 v7.8h, v7.8h
e492db88
MW
111
112 // c += d; b ^= c; b <<<= 12
113 add v6.4s, v6.4s, v7.4s
717b28ab
MW
114 eor v16.16b, v5.16b, v6.16b
115 shl v5.4s, v16.4s, #12
116 sri v5.4s, v16.4s, #20
e492db88
MW
117
118 // a += b; d ^= a; d <<<= 8
119 add v4.4s, v4.4s, v5.4s
717b28ab
MW
120 eor v16.16b, v7.16b, v4.16b
121 shl v7.4s, v16.4s, #8
122 sri v7.4s, v16.4s, #24
e492db88
MW
123
124 // c += d; b ^= c; b <<<= 7
125 add v6.4s, v6.4s, v7.4s
126 ext v7.16b, v7.16b, v7.16b, #4
717b28ab 127 eor v16.16b, v5.16b, v6.16b
e492db88 128 ext v6.16b, v6.16b, v6.16b, #8
717b28ab
MW
129 shl v5.4s, v16.4s, #7
130 sri v5.4s, v16.4s, #25
e492db88
MW
131
132 // Finally finish off undoing the transpose, and we're done for this
133 // doubleround. Again, most of this was done above so we don't have
134 // to wait for the reorderings.
135 ext v5.16b, v5.16b, v5.16b, #12
136
137 // Decrement the loop counter and see if we should go round again.
138 cbz w0, 9f
139
140 // Do the first part of the next round because this loop is offset.
141
142 // a += b; d ^= a; d <<<= 16
143 add v4.4s, v4.4s, v5.4s
144 eor v7.16b, v7.16b, v4.16b
717b28ab 145 rev32 v7.8h, v7.8h
e492db88
MW
146
147 // c += d; b ^= c; b <<<= 12
148 add v6.4s, v6.4s, v7.4s
717b28ab
MW
149 eor v16.16b, v5.16b, v6.16b
150 shl v5.4s, v16.4s, #12
151 sri v5.4s, v16.4s, #20
e492db88
MW
152
153 b 0b
154
155 // Almost there. Firstly the feedfoward addition.
1569: add v0.4s, v0.4s, v4.4s
157 add v1.4s, v1.4s, v5.4s
158 add v2.4s, v2.4s, v6.4s
159 add v3.4s, v3.4s, v7.4s
160
161 // And now we write out the result.
162 st1 {v0.4s-v3.4s}, [x2]
163
164 // And with that, we're done.
165 ret
166
167ENDFUNC
168
169///----- That's all, folks --------------------------------------------------