/// -*- mode: asm; asm-comment-char: ?/ -*- /// /// Fancy SIMD implementation of ChaCha /// /// (c) 2015 Straylight/Edgeware /// ///----- Licensing notice --------------------------------------------------- /// /// This file is part of Catacomb. /// /// Catacomb is free software; you can redistribute it and/or modify /// it under the terms of the GNU Library General Public License as /// published by the Free Software Foundation; either version 2 of the /// License, or (at your option) any later version. /// /// Catacomb is distributed in the hope that it will be useful, /// but WITHOUT ANY WARRANTY; without even the implied warranty of /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the /// GNU Library General Public License for more details. /// /// You should have received a copy of the GNU Library General Public /// License along with Catacomb; if not, write to the Free /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, /// MA 02111-1307, USA. ///-------------------------------------------------------------------------- /// External definitions. #include "config.h" #include "asm-common.h" ///-------------------------------------------------------------------------- /// Main code. .arch pentium4 .section .text FUNC(chacha_core_x86_sse2) // Initial state. We have three arguments: // [ebp + 8] is the number of rounds to do // [ebp + 12] points to the input matrix // [ebp + 16] points to the output matrix push ebp mov ebp, esp sub esp, 16 mov edx, [ebp + 12] and esp, ~15 // First job is to slurp the matrix into XMM registers. Be careful: // the input matrix isn't likely to be properly aligned. // // [ 0 1 2 3] (a, xmm0) // [ 4 5 6 7] (b, xmm0) // [ 8 9 10 11] (c, xmm0) // [12 13 14 15] (d, xmm0) movdqu xmm0, [edx + 0] movdqu xmm1, [edx + 16] movdqu xmm2, [edx + 32] movdqu xmm3, [edx + 48] // Prepare for the main loop. mov ecx, [ebp + 8] // Take a copy for later. This one is aligned properly, by // construction. movdqa [esp], xmm0 movdqa xmm5, xmm1 movdqa xmm6, xmm2 movdqa xmm7, xmm3 loop: // Apply a column quarterround to each of the columns simultaneously. // Alas, there doesn't seem to be a packed doubleword rotate, so we // have to synthesize it. // a += b; d ^= a; d <<<= 16 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 16 psrld xmm4, 16 por xmm3, xmm4 // c += d; b ^= c; b <<<= 12 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm4, xmm1 pslld xmm1, 12 psrld xmm4, 20 por xmm1, xmm4 // a += b; d ^= a; d <<<= 8 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 8 psrld xmm4, 24 por xmm3, xmm4 // c += d; b ^= c; b <<<= 7 paddd xmm2, xmm3 pshufd xmm3, xmm3, 0x93 pxor xmm1, xmm2 pshufd xmm2, xmm2, 0x4e movdqa xmm4, xmm1 pslld xmm1, 7 psrld xmm4, 25 por xmm1, xmm4 // The not-quite-transpose conveniently only involves reordering // elements of individual rows, which can be done quite easily. It // doesn't involve any movement of elements between rows, or even // renaming of the rows. // // [ 0 1 2 3] [ 0 1 2 3] (a, xmm0) // [ 4 5 6 7] --> [ 5 6 7 4] (b, xmm1) // [ 8 9 10 11] [10 11 8 9] (c, xmm2) // [12 13 14 15] [15 12 13 14] (d, xmm3) // // The shuffles have quite high latency, so they've mostly been // pushed upwards. The remaining one can't be moved, though. pshufd xmm1, xmm1, 0x39 // Apply the diagonal quarterround to each of the columns // simultaneously. // a += b; d ^= a; d <<<= 16 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 16 psrld xmm4, 16 por xmm3, xmm4 // c += d; b ^= c; b <<<= 12 paddd xmm2, xmm3 pxor xmm1, xmm2 movdqa xmm4, xmm1 pslld xmm1, 12 psrld xmm4, 20 por xmm1, xmm4 // a += b; d ^= a; d <<<= 8 paddd xmm0, xmm1 pxor xmm3, xmm0 movdqa xmm4, xmm3 pslld xmm3, 8 psrld xmm4, 24 por xmm3, xmm4 // c += d; b ^= c; b <<<= 7 paddd xmm2, xmm3 pshufd xmm3, xmm3, 0x39 pxor xmm1, xmm2 pshufd xmm2, xmm2, 0x4e movdqa xmm4, xmm1 pslld xmm1, 7 psrld xmm4, 25 por xmm1, xmm4 // Finally, finish off undoing the transpose, and we're done for this // doubleround. Again, most of this was done above so we don't have // to wait for the shuffles. pshufd xmm1, xmm1, 0x93 // Decrement the loop counter and see if we should go round again. sub ecx, 2 ja loop // Almost there. Firstly, the feedforward addition. mov edx, [ebp + 16] paddd xmm0, [esp] paddd xmm1, xmm5 paddd xmm2, xmm6 paddd xmm3, xmm7 // And now we write out the result. This one won't be aligned // either. movdqu [edx + 0], xmm0 movdqu [edx + 16], xmm1 movdqu [edx + 32], xmm2 movdqu [edx + 48], xmm3 // Tidy things up. mov esp, ebp pop ebp // And with that, we're done. ret ENDFUNC ///----- That's all, folks --------------------------------------------------