87fb0965633bfc79435cb518cd9fc8a67104c9c3
[catacomb] / symm / chacha-x86-sse2.S
1 /// -*- mode: asm; asm-comment-char: ?/ -*-
2 ///
3 /// Fancy SIMD implementation of ChaCha
4 ///
5 /// (c) 2015 Straylight/Edgeware
6 ///
7
8 ///----- Licensing notice ---------------------------------------------------
9 ///
10 /// This file is part of Catacomb.
11 ///
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
16 ///
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
21 ///
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
26
27 ///--------------------------------------------------------------------------
28 /// External definitions.
29
30 #include "config.h"
31 #include "asm-common.h"
32
33 ///--------------------------------------------------------------------------
34 /// Main code.
35
36 .arch pentium4
37 .section .text
38
39 FUNC(chacha_core_x86_sse2)
40
41 // Initial state. We have three arguments:
42 // [ebp + 8] is the number of rounds to do
43 // [ebp + 12] points to the input matrix
44 // [ebp + 16] points to the output matrix
45 push ebp
46 mov ebp, esp
47 sub esp, 16
48 mov edx, [ebp + 12]
49 and esp, ~15
50
51 // First job is to slurp the matrix into XMM registers. Be careful:
52 // the input matrix isn't likely to be properly aligned.
53 //
54 // [ 0 1 2 3] (a, xmm0)
55 // [ 4 5 6 7] (b, xmm1)
56 // [ 8 9 10 11] (c, xmm2)
57 // [12 13 14 15] (d, xmm3)
58 movdqu xmm0, [edx + 0]
59 movdqu xmm1, [edx + 16]
60 movdqu xmm2, [edx + 32]
61 movdqu xmm3, [edx + 48]
62
63 // Prepare for the main loop.
64 mov ecx, [ebp + 8]
65
66 // Take a copy for later. This one is aligned properly, by
67 // construction.
68 movdqa [esp], xmm0
69 movdqa xmm5, xmm1
70 movdqa xmm6, xmm2
71 movdqa xmm7, xmm3
72
73 loop:
74 // Apply a column quarterround to each of the columns simultaneously.
75 // Alas, there doesn't seem to be a packed doubleword rotate, so we
76 // have to synthesize it.
77
78 // a += b; d ^= a; d <<<= 16
79 paddd xmm0, xmm1
80 pxor xmm3, xmm0
81 movdqa xmm4, xmm3
82 pslld xmm3, 16
83 psrld xmm4, 16
84 por xmm3, xmm4
85
86 // c += d; b ^= c; b <<<= 12
87 paddd xmm2, xmm3
88 pxor xmm1, xmm2
89 movdqa xmm4, xmm1
90 pslld xmm1, 12
91 psrld xmm4, 20
92 por xmm1, xmm4
93
94 // a += b; d ^= a; d <<<= 8
95 paddd xmm0, xmm1
96 pxor xmm3, xmm0
97 movdqa xmm4, xmm3
98 pslld xmm3, 8
99 psrld xmm4, 24
100 por xmm3, xmm4
101
102 // c += d; b ^= c; b <<<= 7
103 paddd xmm2, xmm3
104 pshufd xmm3, xmm3, 0x93
105 pxor xmm1, xmm2
106 pshufd xmm2, xmm2, 0x4e
107 movdqa xmm4, xmm1
108 pslld xmm1, 7
109 psrld xmm4, 25
110 por xmm1, xmm4
111
112 // The not-quite-transpose conveniently only involves reordering
113 // elements of individual rows, which can be done quite easily. It
114 // doesn't involve any movement of elements between rows, or even
115 // renaming of the rows.
116 //
117 // [ 0 1 2 3] [ 0 1 2 3] (a, xmm0)
118 // [ 4 5 6 7] --> [ 5 6 7 4] (b, xmm1)
119 // [ 8 9 10 11] [10 11 8 9] (c, xmm2)
120 // [12 13 14 15] [15 12 13 14] (d, xmm3)
121 //
122 // The shuffles have quite high latency, so they've mostly been
123 // pushed upwards. The remaining one can't be moved, though.
124 pshufd xmm1, xmm1, 0x39
125
126 // Apply the diagonal quarterround to each of the columns
127 // simultaneously.
128
129 // a += b; d ^= a; d <<<= 16
130 paddd xmm0, xmm1
131 pxor xmm3, xmm0
132 movdqa xmm4, xmm3
133 pslld xmm3, 16
134 psrld xmm4, 16
135 por xmm3, xmm4
136
137 // c += d; b ^= c; b <<<= 12
138 paddd xmm2, xmm3
139 pxor xmm1, xmm2
140 movdqa xmm4, xmm1
141 pslld xmm1, 12
142 psrld xmm4, 20
143 por xmm1, xmm4
144
145 // a += b; d ^= a; d <<<= 8
146 paddd xmm0, xmm1
147 pxor xmm3, xmm0
148 movdqa xmm4, xmm3
149 pslld xmm3, 8
150 psrld xmm4, 24
151 por xmm3, xmm4
152
153 // c += d; b ^= c; b <<<= 7
154 paddd xmm2, xmm3
155 pshufd xmm3, xmm3, 0x39
156 pxor xmm1, xmm2
157 pshufd xmm2, xmm2, 0x4e
158 movdqa xmm4, xmm1
159 pslld xmm1, 7
160 psrld xmm4, 25
161 por xmm1, xmm4
162
163 // Finally, finish off undoing the transpose, and we're done for this
164 // doubleround. Again, most of this was done above so we don't have
165 // to wait for the shuffles.
166 pshufd xmm1, xmm1, 0x93
167
168 // Decrement the loop counter and see if we should go round again.
169 sub ecx, 2
170 ja loop
171
172 // Almost there. Firstly, the feedforward addition.
173 mov edx, [ebp + 16]
174 paddd xmm0, [esp]
175 paddd xmm1, xmm5
176 paddd xmm2, xmm6
177 paddd xmm3, xmm7
178
179 // And now we write out the result. This one won't be aligned
180 // either.
181 movdqu [edx + 0], xmm0
182 movdqu [edx + 16], xmm1
183 movdqu [edx + 32], xmm2
184 movdqu [edx + 48], xmm3
185
186 // Tidy things up.
187 mov esp, ebp
188 pop ebp
189
190 // And with that, we're done.
191 ret
192
193 ENDFUNC
194
195 ///----- That's all, folks --------------------------------------------------