| 1 | /// -*- mode: asm; asm-comment-char: ?/ -*- |
| 2 | /// |
| 3 | /// Fancy SIMD implementation of ChaCha |
| 4 | /// |
| 5 | /// (c) 2015 Straylight/Edgeware |
| 6 | /// |
| 7 | |
| 8 | ///----- Licensing notice --------------------------------------------------- |
| 9 | /// |
| 10 | /// This file is part of Catacomb. |
| 11 | /// |
| 12 | /// Catacomb is free software; you can redistribute it and/or modify |
| 13 | /// it under the terms of the GNU Library General Public License as |
| 14 | /// published by the Free Software Foundation; either version 2 of the |
| 15 | /// License, or (at your option) any later version. |
| 16 | /// |
| 17 | /// Catacomb is distributed in the hope that it will be useful, |
| 18 | /// but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 19 | /// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 20 | /// GNU Library General Public License for more details. |
| 21 | /// |
| 22 | /// You should have received a copy of the GNU Library General Public |
| 23 | /// License along with Catacomb; if not, write to the Free |
| 24 | /// Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, |
| 25 | /// MA 02111-1307, USA. |
| 26 | |
| 27 | ///-------------------------------------------------------------------------- |
| 28 | /// External definitions. |
| 29 | |
| 30 | #include "config.h" |
| 31 | #include "asm-common.h" |
| 32 | |
| 33 | ///-------------------------------------------------------------------------- |
| 34 | /// Local utilities. |
| 35 | |
| 36 | // Magic constants for shuffling. |
| 37 | #define ROTL 0x93 |
| 38 | #define ROT2 0x4e |
| 39 | #define ROTR 0x39 |
| 40 | |
| 41 | ///-------------------------------------------------------------------------- |
| 42 | /// Main code. |
| 43 | |
| 44 | .arch pentium4 |
| 45 | .section .text |
| 46 | |
| 47 | FUNC(chacha_core_x86ish_sse2) |
| 48 | |
| 49 | // Initial setup. |
| 50 | |
| 51 | #if CPUFAM_X86 |
| 52 | // Arguments come in on the stack, and will need to be collected. We |
| 53 | // we can get away with just the scratch registers for integer work, |
| 54 | // but we'll run out of XMM registers and will need some properly |
| 55 | // aligned space which we'll steal from the stack. I don't trust the |
| 56 | // stack pointer's alignment, so I'll have to mask the stack pointer, |
| 57 | // which in turn means I'll need to keep track of the old value. |
| 58 | // Hence I'm making a full i386-style stack frame here. |
| 59 | // |
| 60 | // The Windows and SysV ABIs are sufficiently similar that we don't |
| 61 | // need to worry about the differences here. |
| 62 | |
| 63 | # define NR ecx |
| 64 | # define IN eax |
| 65 | # define OUT edx |
| 66 | # define SAVE0 xmm5 |
| 67 | # define SAVE1 xmm6 |
| 68 | # define SAVE2 xmm7 |
| 69 | # define SAVE3 [esp] |
| 70 | |
| 71 | push ebp |
| 72 | mov ebp, esp |
| 73 | sub esp, 16 |
| 74 | mov IN, [ebp + 12] |
| 75 | mov OUT, [ebp + 16] |
| 76 | and esp, ~15 |
| 77 | mov NR, [ebp + 8] |
| 78 | #endif |
| 79 | |
| 80 | #if CPUFAM_AMD64 && ABI_SYSV |
| 81 | // This is nice. We have plenty of XMM registers, and the arguments |
| 82 | // are in useful places. There's no need to spill anything and we |
| 83 | // can just get on with the code. |
| 84 | |
| 85 | # define NR edi |
| 86 | # define IN rsi |
| 87 | # define OUT rdx |
| 88 | # define SAVE0 xmm5 |
| 89 | # define SAVE1 xmm6 |
| 90 | # define SAVE2 xmm7 |
| 91 | # define SAVE3 xmm8 |
| 92 | #endif |
| 93 | |
| 94 | #if CPUFAM_AMD64 && ABI_WIN |
| 95 | // Arguments come in registers, but they're different between Windows |
| 96 | // and everyone else (and everyone else is saner). |
| 97 | // |
| 98 | // The Windows ABI insists that we preserve some of the XMM |
| 99 | // registers, but we want more than we can use as scratch space. We |
| 100 | // only need to save a copy of the input for the feedforward at the |
| 101 | // end, so we might as well use memory rather than spill extra |
| 102 | // registers. (We need an extra 8 bytes to align the stack.) |
| 103 | |
| 104 | # define NR ecx |
| 105 | # define IN rdx |
| 106 | # define OUT r8 |
| 107 | # define SAVE0 xmm5 |
| 108 | # define SAVE1 [rsp + 0] |
| 109 | # define SAVE2 [rsp + 16] |
| 110 | # define SAVE3 [rsp + 32] |
| 111 | |
| 112 | sub rsp, 48 + 8 |
| 113 | #endif |
| 114 | |
| 115 | // First job is to slurp the matrix into XMM registers. Be careful: |
| 116 | // the input matrix isn't likely to be properly aligned. |
| 117 | // |
| 118 | // [ 0 1 2 3] (a, xmm0) |
| 119 | // [ 4 5 6 7] (b, xmm1) |
| 120 | // [ 8 9 10 11] (c, xmm2) |
| 121 | // [12 13 14 15] (d, xmm3) |
| 122 | movdqu xmm0, [IN + 0] |
| 123 | movdqu xmm1, [IN + 16] |
| 124 | movdqu xmm2, [IN + 32] |
| 125 | movdqu xmm3, [IN + 48] |
| 126 | |
| 127 | // Take a copy for later. This one is aligned properly, by |
| 128 | // construction. |
| 129 | movdqa SAVE0, xmm0 |
| 130 | movdqa SAVE1, xmm1 |
| 131 | movdqa SAVE2, xmm2 |
| 132 | movdqa SAVE3, xmm3 |
| 133 | |
| 134 | loop: |
| 135 | // Apply a column quarterround to each of the columns simultaneously. |
| 136 | // Alas, there doesn't seem to be a packed doubleword rotate, so we |
| 137 | // have to synthesize it. |
| 138 | |
| 139 | // a += b; d ^= a; d <<<= 16 |
| 140 | paddd xmm0, xmm1 |
| 141 | pxor xmm3, xmm0 |
| 142 | movdqa xmm4, xmm3 |
| 143 | pslld xmm3, 16 |
| 144 | psrld xmm4, 16 |
| 145 | por xmm3, xmm4 |
| 146 | |
| 147 | // c += d; b ^= c; b <<<= 12 |
| 148 | paddd xmm2, xmm3 |
| 149 | pxor xmm1, xmm2 |
| 150 | movdqa xmm4, xmm1 |
| 151 | pslld xmm1, 12 |
| 152 | psrld xmm4, 20 |
| 153 | por xmm1, xmm4 |
| 154 | |
| 155 | // a += b; d ^= a; d <<<= 8 |
| 156 | paddd xmm0, xmm1 |
| 157 | pxor xmm3, xmm0 |
| 158 | movdqa xmm4, xmm3 |
| 159 | pslld xmm3, 8 |
| 160 | psrld xmm4, 24 |
| 161 | por xmm3, xmm4 |
| 162 | |
| 163 | // c += d; b ^= c; b <<<= 7 |
| 164 | paddd xmm2, xmm3 |
| 165 | pshufd xmm3, xmm3, ROTL |
| 166 | pxor xmm1, xmm2 |
| 167 | pshufd xmm2, xmm2, ROT2 |
| 168 | movdqa xmm4, xmm1 |
| 169 | pslld xmm1, 7 |
| 170 | psrld xmm4, 25 |
| 171 | por xmm1, xmm4 |
| 172 | |
| 173 | // The not-quite-transpose conveniently only involves reordering |
| 174 | // elements of individual rows, which can be done quite easily. It |
| 175 | // doesn't involve any movement of elements between rows, or even |
| 176 | // renaming of the rows. |
| 177 | // |
| 178 | // [ 0 1 2 3] [ 0 1 2 3] (a, xmm0) |
| 179 | // [ 4 5 6 7] --> [ 5 6 7 4] (b, xmm1) |
| 180 | // [ 8 9 10 11] [10 11 8 9] (c, xmm2) |
| 181 | // [12 13 14 15] [15 12 13 14] (d, xmm3) |
| 182 | // |
| 183 | // The shuffles have quite high latency, so they've mostly been |
| 184 | // pushed upwards. The remaining one can't be moved, though. |
| 185 | pshufd xmm1, xmm1, ROTR |
| 186 | |
| 187 | // Apply the diagonal quarterround to each of the columns |
| 188 | // simultaneously. |
| 189 | |
| 190 | // a += b; d ^= a; d <<<= 16 |
| 191 | paddd xmm0, xmm1 |
| 192 | pxor xmm3, xmm0 |
| 193 | movdqa xmm4, xmm3 |
| 194 | pslld xmm3, 16 |
| 195 | psrld xmm4, 16 |
| 196 | por xmm3, xmm4 |
| 197 | |
| 198 | // c += d; b ^= c; b <<<= 12 |
| 199 | paddd xmm2, xmm3 |
| 200 | pxor xmm1, xmm2 |
| 201 | movdqa xmm4, xmm1 |
| 202 | pslld xmm1, 12 |
| 203 | psrld xmm4, 20 |
| 204 | por xmm1, xmm4 |
| 205 | |
| 206 | // a += b; d ^= a; d <<<= 8 |
| 207 | paddd xmm0, xmm1 |
| 208 | pxor xmm3, xmm0 |
| 209 | movdqa xmm4, xmm3 |
| 210 | pslld xmm3, 8 |
| 211 | psrld xmm4, 24 |
| 212 | por xmm3, xmm4 |
| 213 | |
| 214 | // c += d; b ^= c; b <<<= 7 |
| 215 | paddd xmm2, xmm3 |
| 216 | pshufd xmm3, xmm3, ROTR |
| 217 | pxor xmm1, xmm2 |
| 218 | pshufd xmm2, xmm2, ROT2 |
| 219 | movdqa xmm4, xmm1 |
| 220 | pslld xmm1, 7 |
| 221 | psrld xmm4, 25 |
| 222 | por xmm1, xmm4 |
| 223 | |
| 224 | // Finally, finish off undoing the transpose, and we're done for this |
| 225 | // doubleround. Again, most of this was done above so we don't have |
| 226 | // to wait for the shuffles. |
| 227 | pshufd xmm1, xmm1, ROTL |
| 228 | |
| 229 | // Decrement the loop counter and see if we should go round again. |
| 230 | sub NR, 2 |
| 231 | ja loop |
| 232 | |
| 233 | // Almost there. Firstly, the feedforward addition. |
| 234 | paddd xmm0, SAVE0 |
| 235 | paddd xmm1, SAVE1 |
| 236 | paddd xmm2, SAVE2 |
| 237 | paddd xmm3, SAVE3 |
| 238 | |
| 239 | // And now we write out the result. This one won't be aligned |
| 240 | // either. |
| 241 | movdqu [OUT + 0], xmm0 |
| 242 | movdqu [OUT + 16], xmm1 |
| 243 | movdqu [OUT + 32], xmm2 |
| 244 | movdqu [OUT + 48], xmm3 |
| 245 | |
| 246 | // Tidy things up. |
| 247 | #if CPUFAM_X86 |
| 248 | mov esp, ebp |
| 249 | pop ebp |
| 250 | #endif |
| 251 | #if CPUFAM_AMD64 && ABI_WIN |
| 252 | add rsp, 48 + 8 |
| 253 | #endif |
| 254 | |
| 255 | // And with that, we're done. |
| 256 | ret |
| 257 | |
| 258 | ENDFUNC |
| 259 | |
| 260 | ///----- That's all, folks -------------------------------------------------- |