1 /// -*- mode: asm; asm-comment-char: ?/ -*-
3 /// AESNI-based implementation of Rijndael
5 /// (c) 2015 Straylight/Edgeware
8 ///----- Licensing notice ---------------------------------------------------
10 /// This file is part of Catacomb.
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
27 ///--------------------------------------------------------------------------
28 /// External definitions.
31 #include "asm-common.h"
34 .globl F(rijndael_rcon)
36 ///--------------------------------------------------------------------------
39 // Magic constants for shuffling.
44 ///--------------------------------------------------------------------------
50 /// The AESNI instructions implement a little-endian version of AES, but
51 /// Catacomb's internal interface presents as big-endian so as to work better
52 /// with things like GCM. We therefore maintain the round keys in
53 /// little-endian form, and have to end-swap blocks in and out.
55 /// For added amusement, the AESNI instructions don't implement the
56 /// larger-block versions of Rijndael, so we have to end-swap the keys if
57 /// we're preparing for one of those.
60 .equ maxrounds, 16 // maximum number of rounds
61 .equ maxblksz, 32 // maximum block size, in bytes
62 .equ kbufsz, maxblksz*(maxrounds + 1) // size of a key-schedule buffer
65 .equ nr, 0 // number of rounds
66 .equ w, nr + 4 // encryption key words
67 .equ wi, w + kbufsz // decryption key words
69 ///--------------------------------------------------------------------------
72 FUNC(rijndael_setup_x86ish_aesni)
75 // Arguments are on the stack. We'll need to stack the caller's
76 // register veriables, but we'll manage.
78 # define CTX ebp // context pointer
79 # define BLKSZ [esp + 24] // block size
81 # define SI esi // source pointer
82 # define DI edi // destination pointer
84 # define KSZ ebx // key size
85 # define KSZo ebx // ... as address offset
86 # define NKW edx // total number of key words
87 # define NKW_NEEDS_REFRESH 1 // ... needs recalculating
88 # define RCON ecx // round constants table
89 # define LIM edx // limit pointer
90 # define LIMn edx // ... as integer offset from base
91 # define CYIX edi // index in shift-register cycle
93 # define NR ecx // number of rounds
94 # define LRK eax // distance to last key
95 # define LRKo eax // ... as address offset
96 # define BLKOFF edx // block size in bytes
97 # define BLKOFFo edx // ... as address offset
99 // Stack the caller's registers.
105 // Set up our own variables.
106 mov CTX, [esp + 20] // context base pointer
107 mov SI, [esp + 28] // key material
108 mov KSZ, [esp + 32] // key size, in words
111 #if CPUFAM_AMD64 && ABI_SYSV
112 // Arguments are in registers. We have plenty, but, to be honest,
113 // the initial register allocation is a bit annoying.
115 # define CTX r8 // context pointer
116 # define BLKSZ r9d // block size
118 # define SI rsi // source pointer
119 # define DI rdi // destination pointer
121 # define KSZ edx // key size
122 # define KSZo rdx // ... as address offset
123 # define NKW r10d // total number of key words
124 # define RCON rdi // round constants table
125 # define LIMn ecx // limit pointer
126 # define LIM rcx // ... as integer offset from base
127 # define CYIX r11d // index in shift-register cycle
129 # define NR ecx // number of rounds
130 # define LRK eax // distance to last key
131 # define LRKo rax // ... as address offset
132 # define BLKOFF r9d // block size in bytes
133 # define BLKOFFo r9 // ... as address offset
135 // Move arguments to more useful places.
136 mov CTX, rdi // context base pointer
137 mov BLKSZ, esi // block size in words
138 mov SI, rdx // key material
139 mov KSZ, ecx // key size, in words
142 #if CPUFAM_AMD64 && ABI_WIN
143 // Arguments are in different registers, and they're a little tight.
145 # define CTX r8 // context pointer
146 # define BLKSZ edx // block size
148 # define SI rsi // source pointer
149 # define DI rdi // destination pointer
151 # define KSZ r9d // key size
152 # define KSZo r9 // ... as address offset
153 # define NKW r10d // total number of key words
154 # define RCON rdi // round constants table
155 # define LIMn ecx // limit pointer
156 # define LIM rcx // ... as integer offset from base
157 # define CYIX r11d // index in shift-register cycle
159 # define NR ecx // number of rounds
160 # define LRK eax // distance to last key
161 # define LRKo rax // ... as address offset
162 # define BLKOFF edx // block size in bytes
163 # define BLKOFFo rdx // ... as address offset
165 // We'll need the index registers, which belong to the caller in this
170 // Move arguments to more useful places.
171 mov SI, r8 // key material
172 mov CTX, rcx // context base pointer
175 // The initial round key material is taken directly from the input
176 // key, so copy it over.
177 #if CPUFAM_AMD64 && ABI_SYSV
178 // We've been lucky. We already have a copy of the context pointer
179 // in rdi, and the key size in ecx.
187 // Find out other useful things.
188 mov NKW, [CTX + nr] // number of rounds
190 imul NKW, BLKSZ // total key size in words
191 #if !NKW_NEEDS_REFRESH
192 // If we can't keep NKW for later, then we use the same register for
193 // it and LIM, so this move is unnecessary.
196 sub LIMn, KSZ // offset by the key size
198 // Find the round constants.
200 leaext RCON, F(rijndael_rcon), ecx
202 // Prepare for the main loop.
204 mov eax, [SI + 4*KSZo - 4] // most recent key word
205 lea LIM, [SI + 4*LIM] // limit, offset by one key expansion
206 xor CYIX, CYIX // start of new cycle
208 // Main key expansion loop. The first word of each key-length chunk
209 // needs special treatment.
211 // This is rather tedious because the Intel `AESKEYGENASSIST'
212 // instruction is very strangely shaped. Firstly, it wants to
213 // operate on vast SSE registers, even though we're data-blocked from
214 // doing more than operation at a time unless we're doing two key
215 // schedules simultaneously -- and even then we can't do more than
216 // two, because the instruction ignores two of its input words
217 // entirely, and produces two different outputs for each of the other
218 // two. And secondly it insists on taking the magic round constant
219 // as an immediate, so it's kind of annoying if you're not
220 // open-coding the whole thing. It's much easier to leave that as
221 // zero and XOR in the round constant by hand.
222 0: cmp CYIX, 0 // first word of the cycle?
224 cmp CYIX, 4 // fourth word of the cycle?
226 cmp KSZ, 7 // and a large key?
229 // Fourth word of the cycle, and seven or eight words of key. Do a
230 // byte substitution.
232 pshufd xmm0, xmm0, ROTL
233 aeskeygenassist xmm1, xmm0, 0
237 // First word of the cycle. This is the complicated piece.
239 pshufd xmm0, xmm0, ROTR
240 aeskeygenassist xmm1, xmm0, 0
241 pshufd xmm1, xmm1, ROTL
246 // Common tail. Mix in the corresponding word from the previous
247 // cycle and prepare for the next loop.
249 mov [SI + 4*KSZo], eax
259 // Next job is to construct the decryption keys. The keys for the
260 // first and last rounds don't need to be mangled, but the remaining
261 // ones do -- and they all need to be reordered too.
263 // The plan of action, then, is to copy the final encryption round's
264 // keys into place first, then to do each of the intermediate rounds
265 // in reverse order, and finally do the first round.
267 // Do all of the heavy lifting with SSE registers. The order we're
268 // doing this in means that it's OK if we read or write too much, and
269 // there's easily enough buffer space for the over-enthusiastic reads
270 // and writes because the context has space for 32-byte blocks, which
271 // is our maximum and an exact fit for two SSE registers.
272 9: mov NR, [CTX + nr] // number of rounds
273 #if NKW_NEEDS_REFRESH
278 // If we retain NKW, then BLKSZ and BLKOFF are the same register
279 // because we won't need the former again.
284 lea SI, [CTX + w + 4*LRKo] // last round's keys
285 shl BLKOFF, 2 // block size (in bytes now)
287 // Copy the last encryption round's keys.
292 movdqu xmm0, [SI + 16]
293 movdqu [DI + 16], xmm0
295 // Update the loop variables and stop if we've finished.
301 // Do another middle round's keys...
307 movdqu xmm0, [SI + 16]
309 movdqu [DI + 16], xmm0
312 // Finally do the first encryption round.
317 movdqu xmm0, [SI + 16]
318 movdqu [DI + 16], xmm0
320 // If the block size is not exactly four words then we must end-swap
321 // everything. We can use fancy SSE toys for this.
325 // Find the byte-reordering table.
327 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
329 #if NKW_NEEDS_REFRESH
330 // Calculate the number of subkey words again. (It's a good job
331 // we've got a fast multiplier.)
337 // End-swap the encryption keys.
341 // And the decryption keys.
352 #if CPUFAM_AMD64 && ABI_WIN
360 // End-swap NKW words starting at SI. The end-swapping table is
361 // already loaded into XMM5; and it's OK to work in 16-byte chunks.
388 ///--------------------------------------------------------------------------
389 /// Encrypting and decrypting blocks.
391 .macro encdec op, aes, koff
392 FUNC(rijndael_\op\()_x86ish_aesni)
394 // Find the magic endianness-swapping table.
396 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
399 // Arguments come in on the stack, and need to be collected. We
400 // don't have a shortage of registers.
411 #if CPUFAM_AMD64 && ABI_SYSV
412 // Arguments come in registers. All is good.
420 #if CPUFAM_AMD64 && ABI_WIN
421 // Arguments come in different registers.
435 // Initial whitening.
440 // Dispatch to the correct code.
479 movdqu xmm1, [K + 16]
483 movdqu xmm1, [K + 32]
487 movdqu xmm1, [K + 48]
491 movdqu xmm1, [K + 64]
495 movdqu xmm1, [K + 80]
499 movdqu xmm1, [K + 96]
503 movdqu xmm1, [K + 112]
507 movdqu xmm1, [K + 128]
511 movdqu xmm1, [K + 144]
512 \aes\()last xmm0, xmm1
514 // Unpermute the ciphertext block and store it.
532 encdec eblk, aesenc, w
533 encdec dblk, aesdec, wi
535 ///--------------------------------------------------------------------------
536 /// Random utilities.
539 // Abort the process because of a programming error. Indirecting
540 // through this point serves several purposes: (a) by CALLing, rather
541 // than branching to, `abort', we can save the return address, which
542 // might at least provide a hint as to what went wrong; (b) we don't
543 // have conditional CALLs (and they'd be big anyway); and (c) we can
544 // write a HLT here as a backstop against `abort' being mad.
545 bogus: callext F(abort)
549 ///--------------------------------------------------------------------------
559 ///----- That's all, folks --------------------------------------------------