base/asm-common.h, *.S: Include metadata for 64-bit Windows stack unwinding.
[catacomb] / symm / rijndael-x86ish-aesni.S
1 /// -*- mode: asm; asm-comment-char: ?/ -*-
2 ///
3 /// AESNI-based implementation of Rijndael
4 ///
5 /// (c) 2015 Straylight/Edgeware
6 ///
7
8 ///----- Licensing notice ---------------------------------------------------
9 ///
10 /// This file is part of Catacomb.
11 ///
12 /// Catacomb is free software; you can redistribute it and/or modify
13 /// it under the terms of the GNU Library General Public License as
14 /// published by the Free Software Foundation; either version 2 of the
15 /// License, or (at your option) any later version.
16 ///
17 /// Catacomb is distributed in the hope that it will be useful,
18 /// but WITHOUT ANY WARRANTY; without even the implied warranty of
19 /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 /// GNU Library General Public License for more details.
21 ///
22 /// You should have received a copy of the GNU Library General Public
23 /// License along with Catacomb; if not, write to the Free
24 /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 /// MA 02111-1307, USA.
26
27 ///--------------------------------------------------------------------------
28 /// External definitions.
29
30 #include "config.h"
31 #include "asm-common.h"
32
33 .globl F(abort)
34 .globl F(rijndael_rcon)
35
36 ///--------------------------------------------------------------------------
37 /// Local utilities.
38
39 // Magic constants for shuffling.
40 #define ROTL 0x93
41 #define ROT2 0x4e
42 #define ROTR 0x39
43
44 ///--------------------------------------------------------------------------
45 /// Main code.
46
47 .arch .aes
48 .text
49
50 /// The AESNI instructions implement a little-endian version of AES, but
51 /// Catacomb's internal interface presents as big-endian so as to work better
52 /// with things like GCM. We therefore maintain the round keys in
53 /// little-endian form, and have to end-swap blocks in and out.
54 ///
55 /// For added amusement, the AESNI instructions don't implement the
56 /// larger-block versions of Rijndael, so we have to end-swap the keys if
57 /// we're preparing for one of those.
58
59 // Useful constants.
60 .equ maxrounds, 16 // maximum number of rounds
61 .equ maxblksz, 32 // maximum block size, in bytes
62 .equ kbufsz, maxblksz*(maxrounds + 1) // size of a key-schedule buffer
63
64 // Context structure.
65 .equ nr, 0 // number of rounds
66 .equ w, nr + 4 // encryption key words
67 .equ wi, w + kbufsz // decryption key words
68
69 ///--------------------------------------------------------------------------
70 /// Key setup.
71
72 FUNC(rijndael_setup_x86ish_aesni)
73
74 #if CPUFAM_X86
75 // Arguments are on the stack. We'll need to stack the caller's
76 // register veriables, but we'll manage.
77
78 # define CTX ebp // context pointer
79 # define BLKSZ [esp + 24] // block size
80
81 # define SI esi // source pointer
82 # define DI edi // destination pointer
83
84 # define KSZ ebx // key size
85 # define KSZo ebx // ... as address offset
86 # define NKW edx // total number of key words
87 # define NKW_NEEDS_REFRESH 1 // ... needs recalculating
88 # define RCON ecx // round constants table
89 # define LIM edx // limit pointer
90 # define LIMn edx // ... as integer offset from base
91 # define CYIX edi // index in shift-register cycle
92
93 # define NR ecx // number of rounds
94 # define LRK eax // distance to last key
95 # define LRKo eax // ... as address offset
96 # define BLKOFF edx // block size in bytes
97 # define BLKOFFo edx // ... as address offset
98
99 // Stack the caller's registers.
100 push ebp
101 push ebx
102 push esi
103 push edi
104
105 // Set up our own variables.
106 mov CTX, [esp + 20] // context base pointer
107 mov SI, [esp + 28] // key material
108 mov KSZ, [esp + 32] // key size, in words
109 #endif
110
111 #if CPUFAM_AMD64 && ABI_SYSV
112 // Arguments are in registers. We have plenty, but, to be honest,
113 // the initial register allocation is a bit annoying.
114
115 # define CTX r8 // context pointer
116 # define BLKSZ r9d // block size
117
118 # define SI rsi // source pointer
119 # define DI rdi // destination pointer
120
121 # define KSZ edx // key size
122 # define KSZo rdx // ... as address offset
123 # define NKW r10d // total number of key words
124 # define RCON rdi // round constants table
125 # define LIMn ecx // limit pointer
126 # define LIM rcx // ... as integer offset from base
127 # define CYIX r11d // index in shift-register cycle
128
129 # define NR ecx // number of rounds
130 # define LRK eax // distance to last key
131 # define LRKo rax // ... as address offset
132 # define BLKOFF r9d // block size in bytes
133 # define BLKOFFo r9 // ... as address offset
134
135 // Move arguments to more useful places.
136 mov CTX, rdi // context base pointer
137 mov BLKSZ, esi // block size in words
138 mov SI, rdx // key material
139 mov KSZ, ecx // key size, in words
140 #endif
141
142 #if CPUFAM_AMD64 && ABI_WIN
143 // Arguments are in different registers, and they're a little tight.
144
145 # define CTX r8 // context pointer
146 # define BLKSZ edx // block size
147
148 # define SI rsi // source pointer
149 # define DI rdi // destination pointer
150
151 # define KSZ r9d // key size
152 # define KSZo r9 // ... as address offset
153 # define NKW r10d // total number of key words
154 # define RCON rdi // round constants table
155 # define LIMn ecx // limit pointer
156 # define LIM rcx // ... as integer offset from base
157 # define CYIX r11d // index in shift-register cycle
158
159 # define NR ecx // number of rounds
160 # define LRK eax // distance to last key
161 # define LRKo rax // ... as address offset
162 # define BLKOFF edx // block size in bytes
163 # define BLKOFFo rdx // ... as address offset
164
165 // We'll need the index registers, which belong to the caller in this
166 // ABI.
167 push rsi
168 .seh_pushreg rsi
169 push rdi
170 .seh_pushreg rdi
171 .seh_endprologue
172
173 // Move arguments to more useful places.
174 mov SI, r8 // key material
175 mov CTX, rcx // context base pointer
176 #endif
177
178 // The initial round key material is taken directly from the input
179 // key, so copy it over.
180 #if CPUFAM_AMD64 && ABI_SYSV
181 // We've been lucky. We already have a copy of the context pointer
182 // in rdi, and the key size in ecx.
183 add DI, w
184 #else
185 lea DI, [CTX + w]
186 mov ecx, KSZ
187 #endif
188 rep movsd
189
190 // Find out other useful things.
191 mov NKW, [CTX + nr] // number of rounds
192 add NKW, 1
193 imul NKW, BLKSZ // total key size in words
194 #if !NKW_NEEDS_REFRESH
195 // If we can't keep NKW for later, then we use the same register for
196 // it and LIM, so this move is unnecessary.
197 mov LIMn, NKW
198 #endif
199 sub LIMn, KSZ // offset by the key size
200
201 // Find the round constants.
202 ldgot ecx
203 leaext RCON, F(rijndael_rcon), ecx
204
205 // Prepare for the main loop.
206 lea SI, [CTX + w]
207 mov eax, [SI + 4*KSZo - 4] // most recent key word
208 lea LIM, [SI + 4*LIM] // limit, offset by one key expansion
209 xor CYIX, CYIX // start of new cycle
210
211 // Main key expansion loop. The first word of each key-length chunk
212 // needs special treatment.
213 //
214 // This is rather tedious because the Intel `AESKEYGENASSIST'
215 // instruction is very strangely shaped. Firstly, it wants to
216 // operate on vast SSE registers, even though we're data-blocked from
217 // doing more than operation at a time unless we're doing two key
218 // schedules simultaneously -- and even then we can't do more than
219 // two, because the instruction ignores two of its input words
220 // entirely, and produces two different outputs for each of the other
221 // two. And secondly it insists on taking the magic round constant
222 // as an immediate, so it's kind of annoying if you're not
223 // open-coding the whole thing. It's much easier to leave that as
224 // zero and XOR in the round constant by hand.
225 0: cmp CYIX, 0 // first word of the cycle?
226 je 1f
227 cmp CYIX, 4 // fourth word of the cycle?
228 jne 2f
229 cmp KSZ, 7 // and a large key?
230 jb 2f
231
232 // Fourth word of the cycle, and seven or eight words of key. Do a
233 // byte substitution.
234 movd xmm0, eax
235 pshufd xmm0, xmm0, ROTL
236 aeskeygenassist xmm1, xmm0, 0
237 movd eax, xmm1
238 jmp 2f
239
240 // First word of the cycle. This is the complicated piece.
241 1: movd xmm0, eax
242 pshufd xmm0, xmm0, ROTR
243 aeskeygenassist xmm1, xmm0, 0
244 pshufd xmm1, xmm1, ROTL
245 movd eax, xmm1
246 xor al, [RCON]
247 inc RCON
248
249 // Common tail. Mix in the corresponding word from the previous
250 // cycle and prepare for the next loop.
251 2: xor eax, [SI]
252 mov [SI + 4*KSZo], eax
253 add SI, 4
254 inc CYIX
255 cmp SI, LIM
256 jae 9f
257 cmp CYIX, KSZ
258 jb 0b
259 xor CYIX, CYIX
260 jmp 0b
261
262 // Next job is to construct the decryption keys. The keys for the
263 // first and last rounds don't need to be mangled, but the remaining
264 // ones do -- and they all need to be reordered too.
265 //
266 // The plan of action, then, is to copy the final encryption round's
267 // keys into place first, then to do each of the intermediate rounds
268 // in reverse order, and finally do the first round.
269 //
270 // Do all of the heavy lifting with SSE registers. The order we're
271 // doing this in means that it's OK if we read or write too much, and
272 // there's easily enough buffer space for the over-enthusiastic reads
273 // and writes because the context has space for 32-byte blocks, which
274 // is our maximum and an exact fit for two SSE registers.
275 9: mov NR, [CTX + nr] // number of rounds
276 #if NKW_NEEDS_REFRESH
277 mov BLKOFF, BLKSZ
278 mov LRK, NR
279 imul LRK, BLKOFF
280 #else
281 // If we retain NKW, then BLKSZ and BLKOFF are the same register
282 // because we won't need the former again.
283 mov LRK, NKW
284 sub LRK, BLKSZ
285 #endif
286 lea DI, [CTX + wi]
287 lea SI, [CTX + w + 4*LRKo] // last round's keys
288 shl BLKOFF, 2 // block size (in bytes now)
289
290 // Copy the last encryption round's keys.
291 movdqu xmm0, [SI]
292 movdqu [DI], xmm0
293 cmp BLKOFF, 16
294 jbe 0f
295 movdqu xmm0, [SI + 16]
296 movdqu [DI + 16], xmm0
297
298 // Update the loop variables and stop if we've finished.
299 0: add DI, BLKOFFo
300 sub SI, BLKOFFo
301 sub NR, 1
302 jbe 9f
303
304 // Do another middle round's keys...
305 movdqu xmm0, [SI]
306 aesimc xmm0, xmm0
307 movdqu [DI], xmm0
308 cmp BLKOFF, 16
309 jbe 0b
310 movdqu xmm0, [SI + 16]
311 aesimc xmm0, xmm0
312 movdqu [DI + 16], xmm0
313 jmp 0b
314
315 // Finally do the first encryption round.
316 9: movdqu xmm0, [SI]
317 movdqu [DI], xmm0
318 cmp BLKOFF, 16
319 jbe 1f
320 movdqu xmm0, [SI + 16]
321 movdqu [DI + 16], xmm0
322
323 // If the block size is not exactly four words then we must end-swap
324 // everything. We can use fancy SSE toys for this.
325 1: cmp BLKOFF, 16
326 je 9f
327
328 // Find the byte-reordering table.
329 ldgot ecx
330 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
331
332 #if NKW_NEEDS_REFRESH
333 // Calculate the number of subkey words again. (It's a good job
334 // we've got a fast multiplier.)
335 mov NKW, [CTX + nr]
336 add NKW, 1
337 imul NKW, BLKSZ
338 #endif
339
340 // End-swap the encryption keys.
341 lea SI, [CTX + w]
342 call endswap_block
343
344 // And the decryption keys.
345 lea SI, [CTX + wi]
346 call endswap_block
347
348 9: // All done.
349 #if CPUFAM_X86
350 pop edi
351 pop esi
352 pop ebx
353 pop ebp
354 #endif
355 #if CPUFAM_AMD64 && ABI_WIN
356 pop rdi
357 pop rsi
358 #endif
359 ret
360
361 .align 16
362 endswap_block:
363 // End-swap NKW words starting at SI. The end-swapping table is
364 // already loaded into XMM5; and it's OK to work in 16-byte chunks.
365 mov ecx, NKW
366 0: movdqu xmm1, [SI]
367 pshufb xmm1, xmm5
368 movdqu [SI], xmm1
369 add SI, 16
370 sub ecx, 4
371 ja 0b
372 ret
373
374 #undef CTX
375 #undef BLKSZ
376 #undef SI
377 #undef DI
378 #undef KSZ
379 #undef KSZo
380 #undef RCON
381 #undef LIMn
382 #undef LIM
383 #undef NR
384 #undef LRK
385 #undef LRKo
386 #undef BLKOFF
387 #undef BLKOFFo
388
389 ENDFUNC
390
391 ///--------------------------------------------------------------------------
392 /// Encrypting and decrypting blocks.
393
394 .macro encdec op, aes, koff
395 FUNC(rijndael_\op\()_x86ish_aesni)
396
397 #if CPUFAM_X86
398 // Arguments come in on the stack, and need to be collected. We
399 // don't have a shortage of registers.
400
401 # define K ecx
402 # define SRC edx
403 # define DST edx
404 # define NR eax
405
406 mov K, [esp + 4]
407 mov SRC, [esp + 8]
408 #endif
409
410 #if CPUFAM_AMD64 && ABI_SYSV
411 // Arguments come in registers. All is good.
412
413 # define K rdi
414 # define SRC rsi
415 # define DST rdx
416 # define NR eax
417 #endif
418
419 #if CPUFAM_AMD64 && ABI_WIN
420 // Arguments come in different registers.
421
422 # define K rcx
423 # define SRC rdx
424 # define DST r8
425 # define NR eax
426 .seh_endprologue
427 #endif
428
429 // Find the magic endianness-swapping table.
430 ldgot ecx
431 movdqa xmm5, [INTADDR(endswap_tab, ecx)]
432
433 // Initial setup.
434 movdqu xmm0, [SRC]
435 pshufb xmm0, xmm5
436 mov NR, [K + nr]
437 add K, \koff
438
439 // Initial whitening.
440 movdqu xmm1, [K]
441 add K, 16
442 pxor xmm0, xmm1
443
444 // Dispatch to the correct code.
445 cmp NR, 10
446 je 10f
447 jb bogus
448 cmp NR, 14
449 je 14f
450 ja bogus
451 cmp NR, 12
452 je 12f
453 jb 11f
454 jmp 13f
455
456 .align 2
457
458 // 14 rounds...
459 14: movdqu xmm1, [K]
460 add K, 16
461 \aes xmm0, xmm1
462
463 // 13 rounds...
464 13: movdqu xmm1, [K]
465 add K, 16
466 \aes xmm0, xmm1
467
468 // 12 rounds...
469 12: movdqu xmm1, [K]
470 add K, 16
471 \aes xmm0, xmm1
472
473 // 11 rounds...
474 11: movdqu xmm1, [K]
475 add K, 16
476 \aes xmm0, xmm1
477
478 // 10 rounds...
479 10: movdqu xmm1, [K]
480 \aes xmm0, xmm1
481
482 // 9 rounds...
483 movdqu xmm1, [K + 16]
484 \aes xmm0, xmm1
485
486 // 8 rounds...
487 movdqu xmm1, [K + 32]
488 \aes xmm0, xmm1
489
490 // 7 rounds...
491 movdqu xmm1, [K + 48]
492 \aes xmm0, xmm1
493
494 // 6 rounds...
495 movdqu xmm1, [K + 64]
496 \aes xmm0, xmm1
497
498 // 5 rounds...
499 movdqu xmm1, [K + 80]
500 \aes xmm0, xmm1
501
502 // 4 rounds...
503 movdqu xmm1, [K + 96]
504 \aes xmm0, xmm1
505
506 // 3 rounds...
507 movdqu xmm1, [K + 112]
508 \aes xmm0, xmm1
509
510 // 2 rounds...
511 movdqu xmm1, [K + 128]
512 \aes xmm0, xmm1
513
514 // Final round...
515 movdqu xmm1, [K + 144]
516 \aes\()last xmm0, xmm1
517
518 // Unpermute the ciphertext block and store it.
519 pshufb xmm0, xmm5
520 #if CPUFAM_X86
521 mov DST, [esp + 12]
522 #endif
523 movdqu [DST], xmm0
524
525 // And we're done.
526 ret
527
528 #undef K
529 #undef SRC
530 #undef DST
531 #undef NR
532
533 ENDFUNC
534 .endm
535
536 encdec eblk, aesenc, w
537 encdec dblk, aesdec, wi
538
539 ///--------------------------------------------------------------------------
540 /// Random utilities.
541
542 .align 16
543 // Abort the process because of a programming error. Indirecting
544 // through this point serves several purposes: (a) by CALLing, rather
545 // than branching to, `abort', we can save the return address, which
546 // might at least provide a hint as to what went wrong; (b) we don't
547 // have conditional CALLs (and they'd be big anyway); and (c) we can
548 // write a HLT here as a backstop against `abort' being mad.
549 bogus: callext F(abort)
550 0: hlt
551 jmp 0b
552
553 ///--------------------------------------------------------------------------
554 /// Data tables.
555
556 .align 16
557 endswap_tab:
558 .byte 3, 2, 1, 0
559 .byte 7, 6, 5, 4
560 .byte 11, 10, 9, 8
561 .byte 15, 14, 13, 12
562
563 ///----- That's all, folks --------------------------------------------------