/// -*- mode: asm; asm-comment-char: ?/ -*- /// /// Fancy SIMD implementation of ChaCha /// /// (c) 2015 Straylight/Edgeware /// ///----- Licensing notice --------------------------------------------------- /// /// This file is part of Catacomb. /// /// Catacomb is free software; you can redistribute it and/or modify /// it under the terms of the GNU Library General Public License as /// published by the Free Software Foundation; either version 2 of the /// License, or (at your option) any later version. /// /// Catacomb is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU Library General Public License for more details. /// /// You should have received a copy of the GNU Library General Public /// License along with Catacomb; if not, write to the Free /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, /// MA 02111-1307, USA. ///-------------------------------------------------------------------------- /// Preliminaries. #include "config.h" #include "asm-common.h" .text ///-------------------------------------------------------------------------- /// Main code. FUNC(chacha_core_x86ish_avx) .arch .avx vzeroupper endprologue // drop through... ENDFUNC .arch pentium4 FUNC(chacha_core_x86ish_sse2) // Initial setup. #if CPUFAM_X86 // Arguments come in on the stack, and will need to be collected. We // can get away with just the scratch registers for integer work, but // we'll run out of XMM registers and will need some properly aligned // space which we'll steal from the stack. I don't trust the stack // pointer's alignment, so I'll have to mask the stack pointer, which // in turn means I'll need to keep track of the old value. Hence I'm // making a full i386-style stack frame here. // // The Windows and SysV ABIs are sufficiently similar that we don't // need to worry about the differences here. # define NR ecx # define IN eax # define OUT edx # define SAVE0 xmm5 # define SAVE1 xmm6 # define SAVE2 xmm7 # define SAVE3 [SP] pushreg BP setfp stalloc 16 mov IN, [BP + 12] mov OUT, [BP + 16] and SP, ~15 mov NR, [BP + 8] #endif #if CPUFAM_AMD64 && ABI_SYSV // This is nice. We have plenty of XMM registers, and the arguments // are in useful places. There's no need to spill anything and we // can just get on with the code. # define NR edi # define IN rsi # define OUT rdx # define SAVE0 xmm5 # define SAVE1 xmm6 # define SAVE2 xmm7 # define SAVE3 xmm8 #endif #if CPUFAM_AMD64 && ABI_WIN // Arguments come in registers, but they're different between Windows // and everyone else (and everyone else is saner). // // The Windows ABI insists that we preserve some of the XMM // registers, but we want more than we can use as scratch space. We // only need to save a copy of the input for the feedforward at the // end, so we might as well use memory rather than spill extra // registers. (We need an extra 8 bytes to align the stack.) # define NR ecx # define IN rdx # define OUT r8 # define SAVE0 xmm5 # define SAVE1 [SP + 0] # define SAVE2 [SP + 16] # define SAVE3 [SP + 32] stalloc 48 + 8 #endif endprologue // First job is to slurp the matrix into XMM registers. Be careful: // the input matrix isn't likely to be properly aligned. // // [ 0 1 2 3] (a, xmm0) // [ 4 5 6 7] (b, xmm1) // [ 8 9 10 11] (c, xmm2) // [12 13 14 15] (d, xmm3) movdqu xmm0, [IN + 0] movdqu xmm1, [IN + 16] movdqu xmm2, [IN + 32] movdqu xmm3, [IN + 48] // Take a copy for later. This one is aligned properly, by // construction. movdqa SAVE0, xmm0 movdqa SAVE1, xmm1 movdqa SAVE2, xmm2 movdqa SAVE3, xmm3 0: // Apply a column quarterround to each of the columns simultaneously. // Alas, there doesn't seem to be a packed doubleword rotate, so we // have to synthesize it. // a += b; d ^= a; d <<<= 16 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 16 psrld xmm4, 16 por xmm3, xmm4 // c += d; b ^= c; b <<<= 12 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm4, xmm1 pslld xmm1, 12 psrld xmm4, 20 por xmm1, xmm4 // a += b; d ^= a; d <<<= 8 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 8 psrld xmm4, 24 por xmm3, xmm4 // c += d; b ^= c; b <<<= 7 paddd xmm2, xmm3 pshufd xmm3, xmm3, SHUF(3, 0, 1, 2) pxor xmm1, xmm2 pshufd xmm2, xmm2, SHUF(2, 3, 0, 1) movdqa xmm4, xmm1 pslld xmm1, 7 psrld xmm4, 25 por xmm1, xmm4 // The not-quite-transpose conveniently only involves reordering // elements of individual rows, which can be done quite easily. It // doesn't involve any movement of elements between rows, or even // renaming of the rows. // // [ 0 1 2 3] [ 0 1 2 3] (a, xmm0) // [ 4 5 6 7] --> [ 5 6 7 4] (b, xmm1) // [ 8 9 10 11] [10 11 8 9] (c, xmm2) // [12 13 14 15] [15 12 13 14] (d, xmm3) // // The shuffles have quite high latency, so they've mostly been // pushed upwards. The remaining one can't be moved, though. pshufd xmm1, xmm1, SHUF(1, 2, 3, 0) // Apply the diagonal quarterround to each of the columns // simultaneously. // a += b; d ^= a; d <<<= 16 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 16 psrld xmm4, 16 por xmm3, xmm4 // c += d; b ^= c; b <<<= 12 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm4, xmm1 pslld xmm1, 12 psrld xmm4, 20 por xmm1, xmm4 // a += b; d ^= a; d <<<= 8 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 8 psrld xmm4, 24 por xmm3, xmm4 // c += d; b ^= c; b <<<= 7 paddd xmm2, xmm3 pshufd xmm3, xmm3, SHUF(1, 2, 3, 0) pxor xmm1, xmm2 pshufd xmm2, xmm2, SHUF(2, 3, 0, 1) movdqa xmm4, xmm1 pslld xmm1, 7 psrld xmm4, 25 por xmm1, xmm4 // Finally, finish off undoing the transpose, and we're done for this // doubleround. Again, most of this was done above so we don't have // to wait for the shuffles. pshufd xmm1, xmm1, SHUF(3, 0, 1, 2) // Decrement the loop counter and see if we should go round again. sub NR, 2 ja 0b // Almost there. Firstly, the feedforward addition. paddd xmm0, SAVE0 paddd xmm1, SAVE1 paddd xmm2, SAVE2 paddd xmm3, SAVE3 // And now we write out the result. This one won't be aligned // either. movdqu [OUT + 0], xmm0 movdqu [OUT + 16], xmm1 movdqu [OUT + 32], xmm2 movdqu [OUT + 48], xmm3 // Tidy things up. #if CPUFAM_X86 dropfp popreg BP #endif #if CPUFAM_AMD64 && ABI_WIN stfree 48 + 8 #endif // And with that, we're done. ret ENDFUNC FUNC(chacha_multi_i386_sse2) // Arguments are on the stack: // // [sp + 4] pointer to state // [sp + 8] input pointer (or null) // [sp + 12] output pointer // [sp + 16] number of blocks to process // [sp + 20] number of rounds per block pushreg SI pushreg DI pushreg BX stalloc 4*64 endprologue // Load the arguments. mov BX, [SP + 272] // = state pointer mov SI, [SP + 276] // = source pointer mov DI, [SP + 280] // = destination pointer mov CX, [SP + 284] // = block count mov DX, [SP + 288] // = (initial) round count // Do chunks of four blocks at a time. sub CX, 4 jb 8f // Inhale the initial state. movdqu xmm1, [BX + 0] movdqu xmm3, [BX + 16] movdqu xmm5, [BX + 32] movdqu xmm0, [BX + 48] // Set the counters and initialize the working blocks. pxor xmm2, xmm2 pxor xmm4, xmm4 pxor xmm6, xmm6 pxor xmm7, xmm7 xor eax, eax mov al, 1 pinsrw xmm2, eax, 4 mov al, 2 pinsrw xmm4, eax, 4 mov al, 3 pinsrw xmm6, eax, 4 mov al, 4 pinsrw xmm7, eax, 4 movdqa [SP + 16], xmm3 movdqa [SP + 32], xmm5 movdqa [SP + 48], xmm0 paddq xmm2, xmm3 paddq xmm4, xmm3 paddq xmm6, xmm3 paddq xmm7, xmm3 movdqu [BX + 48], xmm7 // a += b; d ^= a; d <<<= 16 paddd xmm1, xmm3 // a += b movdqa [SP + 0], xmm1 pxor xmm0, xmm1 // d ^= a pxor xmm2, xmm1 pxor xmm4, xmm1 pxor xmm6, xmm1 movdqa xmm1, xmm0 movdqa xmm3, xmm2 movdqa xmm5, xmm4 movdqa xmm7, xmm6 pslld xmm0, 16 // d << 16 pslld xmm2, 16 pslld xmm4, 16 pslld xmm6, 16 pslrd xmm1, 16 // d >> 16 pslrd xmm3, 16 pslrd xmm5, 16 pslrd xmm7, 16 por xmm0, xmm1 // d <<<= 16 movdqa xmm1, [SP + 32] por xmm2, xmm3 movdqa xmm3, [SP + 48] por xmm4, xmm5 por xmm6, xmm7 movdqa [SP + 48], xmm0 movdqa [SP + 112], xmm2 movdqa [SP + 176], xmm4 movdqa [SP + 240], xmm6 // c += d; b ^= c; c <<<= 12 paddd xmm0, xmm1 // c += d paddd xmm2, xmm1 paddd xmm4, xmm1 paddd xmm6, xmm1 movdqa [SP + 32], xmm0 movdqa [SP + 96], xmm0 movdqa [SP + 160], xmm0 movdqa [SP + 224], xmm0 pxor xmm0, xmm3 // b ^= c pxor xmm2, xmm3 pxor xmm4, xmm3 pxor xmm6, xmm3 movdqa xmm1, xmm0 movdqa xmm3, xmm2 movdqa xmm5, xmm4 movdqa xmm7, xmm6 pslld xmm0, 16 // d << 16 pslld xmm2, 16 pslld xmm4, 16 pslld xmm6, 16 pslrd xmm1, 16 // d >> 16 pslrd xmm3, 16 pslrd xmm5, 16 pslrd xmm7, 16 por xmm0, xmm1 // d <<<= 16 por xmm2, xmm3 por xmm4, xmm5 por xmm6, xmm7 ENDFUNC ///----- That's all, folks --------------------------------------------------