1 /// -*- mode: asm; asm-comment-char: ?/ -*-
3 /// AESNI-based implementation of Rijndael
5 /// (c) 2015 Straylight/Edgeware
8 ///----- Licensing notice ---------------------------------------------------
10 /// This file is part of Catacomb.
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
27 ///--------------------------------------------------------------------------
31 #include "asm-common.h"
36 .extern F(rijndael_rcon)
40 ///--------------------------------------------------------------------------
43 /// The AESNI instructions implement a little-endian version of AES, but
44 /// Catacomb's internal interface presents as big-endian so as to work better
45 /// with things like GCM. We therefore maintain the round keys in
46 /// little-endian form, and have to end-swap blocks in and out.
48 /// For added amusement, the AESNI instructions don't implement the
49 /// larger-block versions of Rijndael, so we have to end-swap the keys if
50 /// we're preparing for one of those.
53 .equ maxrounds, 16 // maximum number of rounds
54 .equ maxblksz, 32 // maximum block size, in bytes
55 .equ kbufsz, maxblksz*(maxrounds + 1) // size of a key-schedule buffer
58 .equ nr, 0 // number of rounds
59 .equ w, nr + 4 // encryption key words
60 .equ wi, w + kbufsz // decryption key words
62 ///--------------------------------------------------------------------------
65 FUNC(rijndael_setup_x86ish_aesni_avx)
66 vzeroupper // avoid penalty on `legacy' XMM access
68 // and drop through...
71 FUNC(rijndael_setup_x86ish_aesni)
77 // Arguments are on the stack. We'll need to stack the caller's
78 // register veriables, but we'll manage.
80 # define CTX ebp // context pointer
81 # define BLKSZ [esp + 24] // block size
83 # define KSZ ebx // key size
84 # define NKW edx // total number of key words
85 # define NKW_NEEDS_REFRESH 1 // ... needs recalculating
86 # define RCON ecx // round constants table
87 # define LIM edx // limit pointer
88 # define CYIX edi // index in shift-register cycle
90 # define NR ecx // number of rounds
91 # define LRK eax // distance to last key
92 # define BLKOFF edx // block size in bytes
94 // Stack the caller's registers.
100 // Set up our own variables.
101 mov CTX, [esp + 20] // context base pointer
102 mov SI, [esp + 28] // key material
103 mov KSZ, [esp + 32] // key size, in words
106 #if CPUFAM_AMD64 && ABI_SYSV
107 // Arguments are in registers. We have plenty, but, to be honest,
108 // the initial register allocation is a bit annoying.
110 # define CTX r8 // context pointer
111 # define BLKSZ r9d // block size
113 # define KSZ edx // key size
114 # define NKW r10d // total number of key words
115 # define RCON rdi // round constants table
116 # define LIM rcx // limit pointer
117 # define CYIX r11d // index in shift-register cycle
119 # define NR ecx // number of rounds
120 # define LRK eax // distance to last key
121 # define BLKOFF r9d // block size in bytes
123 // Move arguments to more useful places.
124 mov CTX, rdi // context base pointer
125 mov BLKSZ, esi // block size in words
126 mov SI, rdx // key material
127 mov KSZ, ecx // key size, in words
130 #if CPUFAM_AMD64 && ABI_WIN
131 // Arguments are in different registers, and they're a little tight.
133 # define CTX r8 // context pointer
134 # define BLKSZ edx // block size
136 # define KSZ r9d // key size
137 # define NKW r10d // total number of key words
138 # define RCON rdi // round constants table
139 # define LIM rcx // limit pointer
140 # define CYIX r11d // index in shift-register cycle
142 # define NR ecx // number of rounds
143 # define LRK eax // distance to last key
144 # define BLKOFF edx // block size in bytes
146 // We'll need the index registers, which belong to the caller in this
151 // Move arguments to more useful places.
152 mov rsi, r8 // key material
153 mov CTX, rcx // context base pointer
158 // The initial round key material is taken directly from the input
159 // key, so copy it over.
160 #if CPUFAM_AMD64 && ABI_SYSV
161 // We've been lucky. We already have a copy of the context pointer
162 // in rdi, and the key size in ecx.
170 // Find out other useful things.
171 mov NKW, [CTX + nr] // number of rounds
173 imul NKW, BLKSZ // total key size in words
174 #if !NKW_NEEDS_REFRESH
175 // If we can't keep NKW for later, then we use the same register for
176 // it and LIM, so this move is unnecessary.
179 sub DWORD(LIM), KSZ // offset by the key size
181 // Find the round constants.
183 leaext RCON, F(rijndael_rcon), WHOLE(c)
185 // Prepare for the main loop.
187 mov eax, [SI + 4*WHOLE(KSZ) - 4] // most recent key word
188 lea LIM, [SI + 4*LIM] // limit, offset by one key expansion
189 xor CYIX, CYIX // start of new cycle
191 // Main key expansion loop. The first word of each key-length chunk
192 // needs special treatment.
194 // This is rather tedious because the Intel `AESKEYGENASSIST'
195 // instruction is very strangely shaped. Firstly, it wants to
196 // operate on vast SSE registers, even though we're data-blocked from
197 // doing more than operation at a time unless we're doing two key
198 // schedules simultaneously -- and even then we can't do more than
199 // two, because the instruction ignores two of its input words
200 // entirely, and produces two different outputs for each of the other
201 // two. And secondly it insists on taking the magic round constant
202 // as an immediate, so it's kind of annoying if you're not
203 // open-coding the whole thing. It's much easier to leave that as
204 // zero and XOR in the round constant by hand.
205 0: cmp CYIX, 0 // first word of the cycle?
207 cmp CYIX, 4 // fourth word of the cycle?
209 cmp KSZ, 7 // and a large key?
212 // Fourth word of the cycle, and seven or eight words of key. Do a
213 // byte substitution.
215 pshufd xmm0, xmm0, SHUF(3, 0, 1, 2)
216 aeskeygenassist xmm1, xmm0, 0
220 // First word of the cycle. This is the complicated piece.
222 pshufd xmm0, xmm0, SHUF(1, 2, 3, 0)
223 aeskeygenassist xmm1, xmm0, 0
224 pshufd xmm1, xmm1, SHUF(3, 0, 1, 2)
229 // Common tail. Mix in the corresponding word from the previous
230 // cycle and prepare for the next loop.
232 mov [SI + 4*WHOLE(KSZ)], eax
242 // Next job is to construct the decryption keys. The keys for the
243 // first and last rounds don't need to be mangled, but the remaining
244 // ones do -- and they all need to be reordered too.
246 // The plan of action, then, is to copy the final encryption round's
247 // keys into place first, then to do each of the intermediate rounds
248 // in reverse order, and finally do the first round.
250 // Do all of the heavy lifting with SSE registers. The order we're
251 // doing this in means that it's OK if we read or write too much, and
252 // there's easily enough buffer space for the over-enthusiastic reads
253 // and writes because the context has space for 32-byte blocks, which
254 // is our maximum and an exact fit for two SSE registers.
255 9: mov NR, [CTX + nr] // number of rounds
256 #if NKW_NEEDS_REFRESH
261 // If we retain NKW, then BLKSZ and BLKOFF are the same register
262 // because we won't need the former again.
267 lea SI, [CTX + w + 4*WHOLE(LRK)] // last round's keys
268 shl BLKOFF, 2 // block size (in bytes now)
270 // Copy the last encryption round's keys.
275 movdqu xmm0, [SI + 16]
276 movdqu [DI + 16], xmm0
278 // Update the loop variables and stop if we've finished.
279 0: add DI, WHOLE(BLKOFF)
280 sub SI, WHOLE(BLKOFF)
284 // Do another middle round's keys...
290 movdqu xmm0, [SI + 16]
292 movdqu [DI + 16], xmm0
295 // Finally do the first encryption round.
300 movdqu xmm0, [SI + 16]
301 movdqu [DI + 16], xmm0
303 // If the block size is not exactly four words then we must end-swap
304 // everything. We can use fancy SSE toys for this.
308 // Find the byte-reordering table.
310 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
312 #if NKW_NEEDS_REFRESH
313 // Calculate the number of subkey words again. (It's a good job
314 // we've got a fast multiplier.)
320 // End-swap the encryption keys.
324 // And the decryption keys.
335 #if CPUFAM_AMD64 && ABI_WIN
343 INTFUNC(endswap_block)
344 // End-swap NKW words starting at SI. The end-swapping table is
345 // already loaded into XMM5; and it's OK to work in 16-byte chunks.
371 ///--------------------------------------------------------------------------
372 /// Encrypting and decrypting blocks.
374 .macro encdec op, aes, koff
375 FUNC(rijndael_\op\()_x86ish_aesni_avx)
376 vzeroupper // avoid XMM penalties
378 // and drop through...
381 FUNC(rijndael_\op\()_x86ish_aesni)
384 // Arguments come in on the stack, and need to be collected. We
385 // don't have a shortage of registers.
396 #if CPUFAM_AMD64 && ABI_SYSV
397 // Arguments come in registers. All is good.
405 #if CPUFAM_AMD64 && ABI_WIN
406 // Arguments come in different registers.
416 // Find the magic endianness-swapping table.
418 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
426 // Initial whitening.
434 // Dispatch to the correct code.
473 movdqu xmm1, [K + 16]
477 movdqu xmm1, [K + 32]
481 movdqu xmm1, [K + 48]
485 movdqu xmm1, [K + 64]
489 movdqu xmm1, [K + 80]
493 movdqu xmm1, [K + 96]
497 movdqu xmm1, [K + 112]
501 movdqu xmm1, [K + 128]
505 movdqu xmm1, [K + 144]
506 \aes\()last xmm0, xmm1
508 // Unpermute the ciphertext block and store it.
523 encdec eblk, aesenc, w
524 encdec dblk, aesdec, wi
526 ///--------------------------------------------------------------------------
527 /// Random utilities.
530 // Abort the process because of a programming error. Indirecting
531 // through this point serves several purposes: (a) by CALLing, rather
532 // than branching to, `abort', we can save the return address, which
533 // might at least provide a hint as to what went wrong; (b) we don't
534 // have conditional CALLs (and they'd be big anyway); and (c) we can
535 // write a HLT here as a backstop against `abort' being mad.
544 ///--------------------------------------------------------------------------
556 ///----- That's all, folks --------------------------------------------------