/// MA 02111-1307, USA.
///--------------------------------------------------------------------------
-/// External definitions.
+/// Preliminaries.
#include "config.h"
#include "asm-common.h"
+ .text
+
///--------------------------------------------------------------------------
/// Main code.
- .arch pentium4
- .text
+FUNC(salsa20_core_x86ish_avx)
+ .arch .avx
+ vzeroupper
+ endprologue
+ // drop through...
+ENDFUNC
+
+ .arch pentium4
FUNC(salsa20_core_x86ish_sse2)
# define OUT edx
# define SAVE0 xmm6
# define SAVE1 xmm7
-# define SAVE2 [esp + 0]
-# define SAVE3 [esp + 16]
-
- pushreg ebp
- setfp ebp
- sub esp, 32
- mov IN, [ebp + 12]
- mov OUT, [ebp + 16]
- and esp, ~15
- mov NR, [ebp + 8]
+# define SAVE2 [SP + 0]
+# define SAVE3 [SP + 16]
+
+ pushreg BP
+ setfp
+ stalloc 32
+ mov IN, [BP + 12]
+ mov OUT, [BP + 16]
+ and SP, ~15
+ mov NR, [BP + 8]
#endif
#if CPUFAM_AMD64 && ABI_SYSV
# define OUT r8
# define SAVE0 xmm6
# define SAVE1 xmm7
-# define SAVE2 [rsp + 32]
-# define SAVE3 [rsp + 48]
+# define SAVE2 [SP + 32]
+# define SAVE3 [SP + 48]
stalloc 64 + 8
savexmm xmm6, 0
// d ^= (c + b) <<< 13
movdqa xmm4, xmm2
paddd xmm4, xmm1
- pshufd xmm1, xmm1, SHUF(2, 1, 0, 3)
+ pshufd xmm1, xmm1, SHUF(3, 0, 1, 2)
movdqa xmm5, xmm4
pslld xmm4, 13
psrld xmm5, 19
// a ^= (d + c) <<< 18
movdqa xmm4, xmm3
- pshufd xmm3, xmm3, SHUF(0, 3, 2, 1)
+ pshufd xmm3, xmm3, SHUF(1, 2, 3, 0)
paddd xmm4, xmm2
- pshufd xmm2, xmm2, SHUF(1, 0, 3, 2)
+ pshufd xmm2, xmm2, SHUF(2, 3, 0, 1)
movdqa xmm5, xmm4
pslld xmm4, 18
psrld xmm5, 14
// d ^= (c + b) <<< 13
movdqa xmm4, xmm2
paddd xmm4, xmm3
- pshufd xmm3, xmm3, SHUF(2, 1, 0, 3)
+ pshufd xmm3, xmm3, SHUF(3, 0, 1, 2)
movdqa xmm5, xmm4
pslld xmm4, 13
psrld xmm5, 19
// a ^= (d + c) <<< 18
movdqa xmm4, xmm1
- pshufd xmm1, xmm1, SHUF(0, 3, 2, 1)
+ pshufd xmm1, xmm1, SHUF(1, 2, 3, 0)
paddd xmm4, xmm2
- pshufd xmm2, xmm2, SHUF(1, 0, 3, 2)
+ pshufd xmm2, xmm2, SHUF(2, 3, 0, 1)
movdqa xmm5, xmm4
pslld xmm4, 18
psrld xmm5, 14
// input. This can be done by juggling values in registers, with the
// following fancy footwork: some row rotations, a transpose, and
// some more rotations.
- pshufd xmm1, xmm1, SHUF(2, 1, 0, 3) // 3, 4, 9, 14
- pshufd xmm2, xmm2, SHUF(1, 0, 3, 2) // 2, 7, 8, 13
- pshufd xmm3, xmm3, SHUF(0, 3, 2, 1) // 1, 6, 11, 12
+ pshufd xmm1, xmm1, SHUF(3, 0, 1, 2) // 3, 4, 9, 14
+ pshufd xmm2, xmm2, SHUF(2, 3, 0, 1) // 2, 7, 8, 13
+ pshufd xmm3, xmm3, SHUF(1, 2, 3, 0) // 1, 6, 11, 12
movdqa xmm4, xmm0
movdqa xmm5, xmm3
punpckhdq xmm1, xmm3 // 5, 6, 7, 4
punpckhdq xmm2, xmm5 // 15, 12, 13, 14
- pshufd xmm1, xmm1, SHUF(2, 1, 0, 3) // 4, 5, 6, 7
- pshufd xmm4, xmm4, SHUF(1, 0, 3, 2) // 8, 9, 10, 11
- pshufd xmm2, xmm2, SHUF(0, 3, 2, 1) // 12, 13, 14, 15
+ pshufd xmm1, xmm1, SHUF(3, 0, 1, 2) // 4, 5, 6, 7
+ pshufd xmm4, xmm4, SHUF(2, 3, 0, 1) // 8, 9, 10, 11
+ pshufd xmm2, xmm2, SHUF(1, 2, 3, 0) // 12, 13, 14, 15
// Finally we have to write out the result.
movdqu [OUT + 0], xmm0
// Tidy things up.
#if CPUFAM_X86
dropfp
- popreg ebp
+ popreg BP
#endif
#if CPUFAM_AMD64 && ABI_WIN
rstrxmm xmm6, 0
- rsrrxmm xmm7, 16
+ rstrxmm xmm7, 16
stfree 64 + 8
#endif