symm/rijndael-x86-aseni.S: Unify encryption and decryption with a macro.
[catacomb] / symm / chacha-x86-sse2.S
CommitLineData
1a0c09c4
MW
1/// -*- mode: asm; asm-comment-char: ?/ -*-
2///
3/// Fancy SIMD implementation of ChaCha
4///
5/// (c) 2015 Straylight/Edgeware
6///
7
8///----- Licensing notice ---------------------------------------------------
9///
10/// This file is part of Catacomb.
11///
12/// Catacomb is free software; you can redistribute it and/or modify
13/// it under the terms of the GNU Library General Public License as
14/// published by the Free Software Foundation; either version 2 of the
15/// License, or (at your option) any later version.
16///
17/// Catacomb is distributed in the hope that it will be useful,
18/// but WITHOUT ANY WARRANTY; without even the implied warranty of
19/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20/// GNU Library General Public License for more details.
21///
22/// You should have received a copy of the GNU Library General Public
23/// License along with Catacomb; if not, write to the Free
24/// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25/// MA 02111-1307, USA.
26
27///--------------------------------------------------------------------------
28/// External definitions.
29
30#include "config.h"
31#include "asm-common.h"
32
33///--------------------------------------------------------------------------
47103664
MW
34/// Local utilities.
35
36// Magic constants for shuffling.
37#define ROTL 0x93
38#define ROT2 0x4e
39#define ROTR 0x39
40
41///--------------------------------------------------------------------------
1a0c09c4
MW
42/// Main code.
43
44 .arch pentium4
45 .section .text
46
47FUNC(chacha_core_x86_sse2)
48
49 // Initial state. We have three arguments:
50 // [ebp + 8] is the number of rounds to do
51 // [ebp + 12] points to the input matrix
52 // [ebp + 16] points to the output matrix
53 push ebp
54 mov ebp, esp
55 sub esp, 16
56 mov edx, [ebp + 12]
57 and esp, ~15
58
59 // First job is to slurp the matrix into XMM registers. Be careful:
60 // the input matrix isn't likely to be properly aligned.
61 //
62 // [ 0 1 2 3] (a, xmm0)
3197685c
MW
63 // [ 4 5 6 7] (b, xmm1)
64 // [ 8 9 10 11] (c, xmm2)
65 // [12 13 14 15] (d, xmm3)
1a0c09c4
MW
66 movdqu xmm0, [edx + 0]
67 movdqu xmm1, [edx + 16]
68 movdqu xmm2, [edx + 32]
69 movdqu xmm3, [edx + 48]
70
71 // Prepare for the main loop.
72 mov ecx, [ebp + 8]
73
74 // Take a copy for later. This one is aligned properly, by
75 // construction.
76 movdqa [esp], xmm0
77 movdqa xmm5, xmm1
78 movdqa xmm6, xmm2
79 movdqa xmm7, xmm3
80
81loop:
82 // Apply a column quarterround to each of the columns simultaneously.
83 // Alas, there doesn't seem to be a packed doubleword rotate, so we
84 // have to synthesize it.
85
86 // a += b; d ^= a; d <<<= 16
87 paddd xmm0, xmm1
88 pxor xmm3, xmm0
89 movdqa xmm4, xmm3
90 pslld xmm3, 16
91 psrld xmm4, 16
92 por xmm3, xmm4
93
94 // c += d; b ^= c; b <<<= 12
95 paddd xmm2, xmm3
96 pxor xmm1, xmm2
97 movdqa xmm4, xmm1
98 pslld xmm1, 12
99 psrld xmm4, 20
100 por xmm1, xmm4
101
102 // a += b; d ^= a; d <<<= 8
103 paddd xmm0, xmm1
104 pxor xmm3, xmm0
105 movdqa xmm4, xmm3
106 pslld xmm3, 8
107 psrld xmm4, 24
108 por xmm3, xmm4
109
110 // c += d; b ^= c; b <<<= 7
111 paddd xmm2, xmm3
47103664 112 pshufd xmm3, xmm3, ROTL
1a0c09c4 113 pxor xmm1, xmm2
47103664 114 pshufd xmm2, xmm2, ROT2
1a0c09c4
MW
115 movdqa xmm4, xmm1
116 pslld xmm1, 7
117 psrld xmm4, 25
118 por xmm1, xmm4
119
120 // The not-quite-transpose conveniently only involves reordering
121 // elements of individual rows, which can be done quite easily. It
122 // doesn't involve any movement of elements between rows, or even
123 // renaming of the rows.
124 //
125 // [ 0 1 2 3] [ 0 1 2 3] (a, xmm0)
126 // [ 4 5 6 7] --> [ 5 6 7 4] (b, xmm1)
127 // [ 8 9 10 11] [10 11 8 9] (c, xmm2)
128 // [12 13 14 15] [15 12 13 14] (d, xmm3)
129 //
130 // The shuffles have quite high latency, so they've mostly been
131 // pushed upwards. The remaining one can't be moved, though.
47103664 132 pshufd xmm1, xmm1, ROTR
1a0c09c4
MW
133
134 // Apply the diagonal quarterround to each of the columns
135 // simultaneously.
136
137 // a += b; d ^= a; d <<<= 16
138 paddd xmm0, xmm1
139 pxor xmm3, xmm0
140 movdqa xmm4, xmm3
141 pslld xmm3, 16
142 psrld xmm4, 16
143 por xmm3, xmm4
144
145 // c += d; b ^= c; b <<<= 12
146 paddd xmm2, xmm3
147 pxor xmm1, xmm2
148 movdqa xmm4, xmm1
149 pslld xmm1, 12
150 psrld xmm4, 20
151 por xmm1, xmm4
152
153 // a += b; d ^= a; d <<<= 8
154 paddd xmm0, xmm1
155 pxor xmm3, xmm0
156 movdqa xmm4, xmm3
157 pslld xmm3, 8
158 psrld xmm4, 24
159 por xmm3, xmm4
160
161 // c += d; b ^= c; b <<<= 7
162 paddd xmm2, xmm3
47103664 163 pshufd xmm3, xmm3, ROTR
1a0c09c4 164 pxor xmm1, xmm2
47103664 165 pshufd xmm2, xmm2, ROT2
1a0c09c4
MW
166 movdqa xmm4, xmm1
167 pslld xmm1, 7
168 psrld xmm4, 25
169 por xmm1, xmm4
170
171 // Finally, finish off undoing the transpose, and we're done for this
172 // doubleround. Again, most of this was done above so we don't have
173 // to wait for the shuffles.
47103664 174 pshufd xmm1, xmm1, ROTL
1a0c09c4
MW
175
176 // Decrement the loop counter and see if we should go round again.
177 sub ecx, 2
178 ja loop
179
180 // Almost there. Firstly, the feedforward addition.
181 mov edx, [ebp + 16]
182 paddd xmm0, [esp]
183 paddd xmm1, xmm5
184 paddd xmm2, xmm6
185 paddd xmm3, xmm7
186
187 // And now we write out the result. This one won't be aligned
188 // either.
189 movdqu [edx + 0], xmm0
190 movdqu [edx + 16], xmm1
191 movdqu [edx + 32], xmm2
192 movdqu [edx + 48], xmm3
193
194 // Tidy things up.
195 mov esp, ebp
196 pop ebp
197
198 // And with that, we're done.
199 ret
200
201ENDFUNC
202
203///----- That's all, folks --------------------------------------------------