1 /// -*- mode: asm; asm-comment-char: ?/ -*-
3 /// AESNI-based implementation of Rijndael
5 /// (c) 2015 Straylight/Edgeware
8 ///----- Licensing notice ---------------------------------------------------
10 /// This file is part of Catacomb.
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
27 ///--------------------------------------------------------------------------
28 /// External definitions.
31 #include "asm-common.h"
34 .globl F(rijndael_rcon)
36 ///--------------------------------------------------------------------------
42 /// The AESNI instructions implement a little-endian version of AES, but
43 /// Catacomb's internal interface presents as big-endian so as to work better
44 /// with things like GCM. We therefore maintain the round keys in
45 /// little-endian form, and have to end-swap blocks in and out.
47 /// For added amusement, the AESNI instructions don't implement the
48 /// larger-block versions of Rijndael, so we have to end-swap the keys if
49 /// we're preparing for one of those.
52 .equ maxrounds, 16 // maximum number of rounds
53 .equ maxblksz, 32 // maximum block size, in bytes
54 .equ kbufsz, maxblksz*(maxrounds + 1) // size of a key-schedule buffer
57 .equ nr, 0 // number of rounds
58 .equ w, nr + 4 // encryption key words
59 .equ wi, w + kbufsz // decryption key words
61 ///--------------------------------------------------------------------------
64 FUNC(rijndael_setup_x86ish_aesni)
67 // Arguments are on the stack. We'll need to stack the caller's
68 // register veriables, but we'll manage.
70 # define CTX ebp // context pointer
71 # define BLKSZ [esp + 24] // block size
73 # define SI esi // source pointer
74 # define DI edi // destination pointer
76 # define KSZ ebx // key size
77 # define KSZo ebx // ... as address offset
78 # define NKW edx // total number of key words
79 # define NKW_NEEDS_REFRESH 1 // ... needs recalculating
80 # define RCON ecx // round constants table
81 # define LIM edx // limit pointer
82 # define LIMn edx // ... as integer offset from base
83 # define CYIX edi // index in shift-register cycle
85 # define NR ecx // number of rounds
86 # define LRK eax // distance to last key
87 # define LRKo eax // ... as address offset
88 # define BLKOFF edx // block size in bytes
89 # define BLKOFFo edx // ... as address offset
91 // Stack the caller's registers.
97 // Set up our own variables.
98 mov CTX, [esp + 20] // context base pointer
99 mov SI, [esp + 28] // key material
100 mov KSZ, [esp + 32] // key size, in words
103 #if CPUFAM_AMD64 && ABI_SYSV
104 // Arguments are in registers. We have plenty, but, to be honest,
105 // the initial register allocation is a bit annoying.
107 # define CTX r8 // context pointer
108 # define BLKSZ r9d // block size
110 # define SI rsi // source pointer
111 # define DI rdi // destination pointer
113 # define KSZ edx // key size
114 # define KSZo rdx // ... as address offset
115 # define NKW r10d // total number of key words
116 # define RCON rdi // round constants table
117 # define LIMn ecx // limit pointer
118 # define LIM rcx // ... as integer offset from base
119 # define CYIX r11d // index in shift-register cycle
121 # define NR ecx // number of rounds
122 # define LRK eax // distance to last key
123 # define LRKo rax // ... as address offset
124 # define BLKOFF r9d // block size in bytes
125 # define BLKOFFo r9 // ... as address offset
127 // Move arguments to more useful places.
128 mov CTX, rdi // context base pointer
129 mov BLKSZ, esi // block size in words
130 mov SI, rdx // key material
131 mov KSZ, ecx // key size, in words
134 #if CPUFAM_AMD64 && ABI_WIN
135 // Arguments are in different registers, and they're a little tight.
137 # define CTX r8 // context pointer
138 # define BLKSZ edx // block size
140 # define SI rsi // source pointer
141 # define DI rdi // destination pointer
143 # define KSZ r9d // key size
144 # define KSZo r9 // ... as address offset
145 # define NKW r10d // total number of key words
146 # define RCON rdi // round constants table
147 # define LIMn ecx // limit pointer
148 # define LIM rcx // ... as integer offset from base
149 # define CYIX r11d // index in shift-register cycle
151 # define NR ecx // number of rounds
152 # define LRK eax // distance to last key
153 # define LRKo rax // ... as address offset
154 # define BLKOFF edx // block size in bytes
155 # define BLKOFFo rdx // ... as address offset
157 // We'll need the index registers, which belong to the caller in this
165 // Move arguments to more useful places.
166 mov SI, r8 // key material
167 mov CTX, rcx // context base pointer
170 // The initial round key material is taken directly from the input
171 // key, so copy it over.
172 #if CPUFAM_AMD64 && ABI_SYSV
173 // We've been lucky. We already have a copy of the context pointer
174 // in rdi, and the key size in ecx.
182 // Find out other useful things.
183 mov NKW, [CTX + nr] // number of rounds
185 imul NKW, BLKSZ // total key size in words
186 #if !NKW_NEEDS_REFRESH
187 // If we can't keep NKW for later, then we use the same register for
188 // it and LIM, so this move is unnecessary.
191 sub LIMn, KSZ // offset by the key size
193 // Find the round constants.
195 leaext RCON, F(rijndael_rcon), ecx
197 // Prepare for the main loop.
199 mov eax, [SI + 4*KSZo - 4] // most recent key word
200 lea LIM, [SI + 4*LIM] // limit, offset by one key expansion
201 xor CYIX, CYIX // start of new cycle
203 // Main key expansion loop. The first word of each key-length chunk
204 // needs special treatment.
206 // This is rather tedious because the Intel `AESKEYGENASSIST'
207 // instruction is very strangely shaped. Firstly, it wants to
208 // operate on vast SSE registers, even though we're data-blocked from
209 // doing more than operation at a time unless we're doing two key
210 // schedules simultaneously -- and even then we can't do more than
211 // two, because the instruction ignores two of its input words
212 // entirely, and produces two different outputs for each of the other
213 // two. And secondly it insists on taking the magic round constant
214 // as an immediate, so it's kind of annoying if you're not
215 // open-coding the whole thing. It's much easier to leave that as
216 // zero and XOR in the round constant by hand.
217 0: cmp CYIX, 0 // first word of the cycle?
219 cmp CYIX, 4 // fourth word of the cycle?
221 cmp KSZ, 7 // and a large key?
224 // Fourth word of the cycle, and seven or eight words of key. Do a
225 // byte substitution.
227 pshufd xmm0, xmm0, SHUF(2, 1, 0, 3)
228 aeskeygenassist xmm1, xmm0, 0
232 // First word of the cycle. This is the complicated piece.
234 pshufd xmm0, xmm0, SHUF(0, 3, 2, 1)
235 aeskeygenassist xmm1, xmm0, 0
236 pshufd xmm1, xmm1, SHUF(2, 1, 0, 3)
241 // Common tail. Mix in the corresponding word from the previous
242 // cycle and prepare for the next loop.
244 mov [SI + 4*KSZo], eax
254 // Next job is to construct the decryption keys. The keys for the
255 // first and last rounds don't need to be mangled, but the remaining
256 // ones do -- and they all need to be reordered too.
258 // The plan of action, then, is to copy the final encryption round's
259 // keys into place first, then to do each of the intermediate rounds
260 // in reverse order, and finally do the first round.
262 // Do all of the heavy lifting with SSE registers. The order we're
263 // doing this in means that it's OK if we read or write too much, and
264 // there's easily enough buffer space for the over-enthusiastic reads
265 // and writes because the context has space for 32-byte blocks, which
266 // is our maximum and an exact fit for two SSE registers.
267 9: mov NR, [CTX + nr] // number of rounds
268 #if NKW_NEEDS_REFRESH
273 // If we retain NKW, then BLKSZ and BLKOFF are the same register
274 // because we won't need the former again.
279 lea SI, [CTX + w + 4*LRKo] // last round's keys
280 shl BLKOFF, 2 // block size (in bytes now)
282 // Copy the last encryption round's keys.
287 movdqu xmm0, [SI + 16]
288 movdqu [DI + 16], xmm0
290 // Update the loop variables and stop if we've finished.
296 // Do another middle round's keys...
302 movdqu xmm0, [SI + 16]
304 movdqu [DI + 16], xmm0
307 // Finally do the first encryption round.
312 movdqu xmm0, [SI + 16]
313 movdqu [DI + 16], xmm0
315 // If the block size is not exactly four words then we must end-swap
316 // everything. We can use fancy SSE toys for this.
320 // Find the byte-reordering table.
322 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
324 #if NKW_NEEDS_REFRESH
325 // Calculate the number of subkey words again. (It's a good job
326 // we've got a fast multiplier.)
332 // End-swap the encryption keys.
336 // And the decryption keys.
347 #if CPUFAM_AMD64 && ABI_WIN
355 // End-swap NKW words starting at SI. The end-swapping table is
356 // already loaded into XMM5; and it's OK to work in 16-byte chunks.
383 ///--------------------------------------------------------------------------
384 /// Encrypting and decrypting blocks.
386 .macro encdec op, aes, koff
387 FUNC(rijndael_\op\()_x86ish_aesni)
390 // Arguments come in on the stack, and need to be collected. We
391 // don't have a shortage of registers.
402 #if CPUFAM_AMD64 && ABI_SYSV
403 // Arguments come in registers. All is good.
411 #if CPUFAM_AMD64 && ABI_WIN
412 // Arguments come in different registers.
421 // Find the magic endianness-swapping table.
423 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
431 // Initial whitening.
439 // Dispatch to the correct code.
478 movdqu xmm1, [K + 16]
482 movdqu xmm1, [K + 32]
486 movdqu xmm1, [K + 48]
490 movdqu xmm1, [K + 64]
494 movdqu xmm1, [K + 80]
498 movdqu xmm1, [K + 96]
502 movdqu xmm1, [K + 112]
506 movdqu xmm1, [K + 128]
510 movdqu xmm1, [K + 144]
511 \aes\()last xmm0, xmm1
513 // Unpermute the ciphertext block and store it.
528 encdec eblk, aesenc, w
529 encdec dblk, aesdec, wi
531 ///--------------------------------------------------------------------------
532 /// Random utilities.
535 // Abort the process because of a programming error. Indirecting
536 // through this point serves several purposes: (a) by CALLing, rather
537 // than branching to, `abort', we can save the return address, which
538 // might at least provide a hint as to what went wrong; (b) we don't
539 // have conditional CALLs (and they'd be big anyway); and (c) we can
540 // write a HLT here as a backstop against `abort' being mad.
541 bogus: callext F(abort)
545 ///--------------------------------------------------------------------------
557 ///----- That's all, folks --------------------------------------------------