progs/perftest.c: Use from Glibc syscall numbers.
[catacomb] / symm / chacha-arm-neon.S
CommitLineData
704d59c8
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of ChaCha for ARM
4///
5/// (c) 2016 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
df07f2c0 28/// Preliminaries.
704d59c8
MW
29
30#include "config.h"
31#include "asm-common.h"
32
704d59c8
MW
33 .arch armv7-a
34 .fpu neon
df07f2c0 35
bc9ac7eb 36 .text
704d59c8 37
df07f2c0 38///--------------------------------------------------------------------------
6d7e6032 39/// Main code.
df07f2c0 40
704d59c8
MW
41FUNC(chacha_core_arm_neon)
42
43 // Arguments are in registers.
44 // r0 is the number of rounds to perform
45 // r1 points to the input matrix
46 // r2 points to the output matrix
47
48 // First job is to slurp the matrix into the SIMD registers. vldm
49 // and vstm work on word-aligned data, so this is fine.
50 //
51 // [ 0 1 2 3] (a, q8)
52 // [ 4 5 6 7] (b, q9)
53 // [ 8 9 10 11] (c, q10)
54 // [12 13 14 15] (d, q11)
55 //
56 // We need a copy for later. Rather than waste time copying them by
57 // hand, we'll use the three-address nature of the instruction set.
58 // But this means that the main loop is offset by a bit.
43ea7558 59 vldmia r1, {QQ(q12, q15)}
704d59c8
MW
60
61 // a += b; d ^= a; d <<<= 16
62 vadd.u32 q8, q12, q13
63 veor q11, q15, q8
717b28ab 64 vrev32.16 q11, q11
704d59c8
MW
65
66 // c += d; b ^= c; b <<<= 12
67 vadd.u32 q10, q14, q11
717b28ab
MW
68 veor q0, q13, q10
69 vshl.u32 q9, q0, #12
70 vsri.u32 q9, q0, #20
704d59c8
MW
71
720:
73 // Apply (the rest of) a column quarterround to each of the columns
74 // simultaneously. Alas, there doesn't seem to be a packed word
75 // rotate, so we have to synthesize it.
76
77 // a += b; d ^= a; d <<<= 8
78 vadd.u32 q8, q8, q9
717b28ab
MW
79 veor q0, q11, q8
80 vshl.u32 q11, q0, #8
81 vsri.u32 q11, q0, #24
704d59c8
MW
82
83 // c += d; b ^= c; b <<<= 7
84 vadd.u32 q10, q10, q11
70bc6059 85 vext.32 q11, q11, q11, #3
717b28ab 86 veor q0, q9, q10
70bc6059 87 vext.32 q10, q10, q10, #2
717b28ab
MW
88 vshl.u32 q9, q0, #7
89 vsri.u32 q9, q0, #25
704d59c8
MW
90
91 // The not-quite-transpose conveniently only involves reordering
92 // elements of individual rows, which can be done quite easily. It
93 // doesn't involve any movement of elements between rows, or even
94 // renaming of the rows.
95 //
96 // [ 0 1 2 3] [ 0 1 2 3] (a, q8)
97 // [ 4 5 6 7] --> [ 5 6 7 4] (b, q9)
98 // [ 8 9 10 11] [10 11 8 9] (c, q10)
99 // [12 13 14 15] [15 12 13 14] (d, q11)
100 //
101 // The reorderings have for the most part been pushed upwards to
102 // reduce delays.
103 vext.32 q9, q9, q9, #1
104
105 // Apply the diagonal quarterround to each of the columns
106 // simultaneously.
107
108 // a += b; d ^= a; d <<<= 16
109 vadd.u32 q8, q8, q9
110 veor q11, q11, q8
717b28ab 111 vrev32.16 q11, q11
704d59c8
MW
112
113 // c += d; b ^= c; b <<<= 12
114 vadd.u32 q10, q10, q11
717b28ab
MW
115 veor q0, q9, q10
116 vshl.u32 q9, q0, #12
117 vsri.u32 q9, q0, #20
704d59c8
MW
118
119 // a += b; d ^= a; d <<<= 8
120 vadd.u32 q8, q8, q9
717b28ab
MW
121 veor q0, q11, q8
122 vshl.u32 q11, q0, #8
123 vsri.u32 q11, q0, #24
704d59c8
MW
124
125 // c += d; b ^= c; b <<<= 7
126 vadd.u32 q10, q10, q11
70bc6059 127 vext.32 q11, q11, q11, #1
717b28ab 128 veor q0, q9, q10
70bc6059 129 vext.32 q10, q10, q10, #2
717b28ab
MW
130 vshl.u32 q9, q0, #7
131 vsri.u32 q9, q0, #25
704d59c8
MW
132
133 // Finally finish off undoing the transpose, and we're done for this
134 // doubleround. Again, most of this was done above so we don't have
135 // to wait for the reorderings.
136 vext.32 q9, q9, q9, #3
137
138 // Decrement the loop counter and see if we should go round again.
139 subs r0, r0, #2
140 bls 9f
141
142 // Do the first part of the next round because this loop is offset.
143
144 // a += b; d ^= a; d <<<= 16
145 vadd.u32 q8, q8, q9
146 veor q11, q11, q8
717b28ab 147 vrev32.16 q11, q11
704d59c8
MW
148
149 // c += d; b ^= c; b <<<= 12
150 vadd.u32 q10, q10, q11
717b28ab
MW
151 veor q0, q9, q10
152 vshl.u32 q9, q0, #12
153 vsri.u32 q9, q0, #20
704d59c8
MW
154
155 b 0b
156
157 // Almost there. Firstly the feedfoward addition.
1589: vadd.u32 q8, q8, q12
159 vadd.u32 q9, q9, q13
160 vadd.u32 q10, q10, q14
161 vadd.u32 q11, q11, q15
162
163 // And now we write out the result.
43ea7558 164 vstmia r2, {QQ(q8, q11)}
704d59c8
MW
165
166 // And with that, we're done.
167 bx r14
168
169ENDFUNC
170
171///----- That's all, folks --------------------------------------------------