mulacc [eax + 12], ecx, xmm7, xmm4, xmm5, xmm6, t
propout [edi + 12], xmm7, xmm4
- movdqa [esp + 0], xmm4
- movdqa [esp + 16], xmm5
- movdqa [esp + 32], xmm6
+ movdqa [SP + 0], xmm4
+ movdqa [SP + 16], xmm5
+ movdqa [SP + 32], xmm6
// Calculate Y = W M.
mulcore [edi + 0], esi, xmm4, xmm5, xmm6, xmm7
propout [edi + 12], xmm7, xmm4
// Add add on the carry we calculated earlier.
- paddq xmm4, [esp + 0]
- paddq xmm5, [esp + 16]
- paddq xmm6, [esp + 32]
+ paddq xmm4, [SP + 0]
+ paddq xmm5, [SP + 16]
+ paddq xmm6, [SP + 32]
// And, with that, we're done.
stfree 48 + 12
// void mpx_umul4_x86_sse2(mpw *dv, const mpw *av, const mpw *avl,
// const mpw *bv, const mpw *bvl);
- // Build a stack frame. Arguments will be relative to EBP, as
+ // Build a stack frame. Arguments will be relative to BP, as
// follows.
//
- // ebp + 20 dv
- // ebp + 24 av
- // ebp + 28 avl
- // ebp + 32 bv
- // ebp + 36 bvl
+ // BP + 20 dv
+ // BP + 24 av
+ // BP + 28 avl
+ // BP + 32 bv
+ // BP + 36 bvl
//
- // Locals are relative to ESP, as follows.
+ // Locals are relative to SP, as follows.
//
- // esp + 0 expanded Y (32 bytes)
- // esp + 32 (top of locals)
- pushreg ebp
+ // SP + 0 expanded Y (32 bytes)
+ // SP + 32 (top of locals)
+ pushreg BP
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
- and esp, ~15
- sub esp, 32
+ setfp
+ stalloc 32
+ and SP, ~15
endprologue
// Prepare for the first iteration.
- mov esi, [ebp + 32] // -> bv[0]
+ mov esi, [BP + 32] // -> bv[0]
pxor xmm7, xmm7
movdqu xmm0, [esi] // bv[0]
- mov edi, [ebp + 20] // -> dv[0]
+ mov edi, [BP + 20] // -> dv[0]
mov ecx, edi // outer loop dv cursor
expand xmm7, xmm0, xmm1
- mov ebx, [ebp + 24] // -> av[0]
- mov eax, [ebp + 28] // -> av[m] = av limit
- mov edx, esp // -> expanded Y = bv[0]
- movdqa [esp + 0], xmm0 // bv[0] expanded low
- movdqa [esp + 16], xmm1 // bv[0] expanded high
+ mov ebx, [BP + 24] // -> av[0]
+ mov eax, [BP + 28] // -> av[m] = av limit
+ mov edx, SP // -> expanded Y = bv[0]
+ movdqa [SP + 0], xmm0 // bv[0] expanded low
+ movdqa [SP + 16], xmm1 // bv[0] expanded high
call mul4zc
add ebx, 16
add edi, 16
// Write out the leftover carry. There can be no tail here.
8: call carryprop
- cmp esi, [ebp + 36] // more passes to do?
+ cmp esi, [BP + 36] // more passes to do?
jae 9f
.p2align 4
mov edi, ecx // -> dv[i]
pxor xmm7, xmm7
expand xmm7, xmm0, xmm1
- mov ebx, [ebp + 24] // -> av[0]
- movdqa [esp + 0], xmm0 // bv[i] expanded low
- movdqa [esp + 16], xmm1 // bv[i] expanded high
+ mov ebx, [BP + 24] // -> av[0]
+ movdqa [SP + 0], xmm0 // bv[i] expanded low
+ movdqa [SP + 16], xmm1 // bv[i] expanded high
call mla4zc
add edi, 16
add ebx, 16
// Finish off this pass. There was no tail on the previous pass, and
// there can be none on this pass.
8: call carryprop
- cmp esi, [ebp + 36]
+ cmp esi, [BP + 36]
jb 1b
// All over.
pop edi
pop esi
pop ebx
- pop ebp
+ pop BP
ret
ENDFUNC
// void mpxmont_mul4_x86_sse2(mpw *dv, const mpw *av, const mpw *bv,
// const mpw *nv, size_t n, const mpw *mi);
- // Build a stack frame. Arguments will be relative to EBP, as
+ // Build a stack frame. Arguments will be relative to BP, as
// follows.
//
- // ebp + 20 dv
- // ebp + 24 av
- // ebp + 28 bv
- // ebp + 32 nv
- // ebp + 36 n (nonzero multiple of 4)
- // ebp + 40 mi
+ // BP + 20 dv
+ // BP + 24 av
+ // BP + 28 bv
+ // BP + 32 nv
+ // BP + 36 n (nonzero multiple of 4)
+ // BP + 40 mi
//
- // Locals are relative to ESP, which 16-byte aligned, as follows.
+ // Locals are relative to SP, which 16-byte aligned, as follows.
//
- // esp + 0 expanded V (32 bytes)
- // esp + 32 expanded M (32 bytes)
- // esp + 64 expanded Y (32 bytes)
- // esp + 96 outer loop dv
- // esp + 100 outer loop bv
- // esp + 104 av limit (mostly in ESI)
- // esp + 108 bv limit
- // esp + 112 (top of locals)
- pushreg ebp
+ // SP + 0 expanded V (32 bytes)
+ // SP + 32 expanded M (32 bytes)
+ // SP + 64 expanded Y (32 bytes)
+ // SP + 96 outer loop dv
+ // SP + 100 outer loop bv
+ // SP + 104 av limit (mostly in ESI)
+ // SP + 108 bv limit
+ // SP + 112 (top of locals)
+ pushreg BP
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
- and esp, ~15
- sub esp, 112
+ setfp
+ stalloc 112
+ and SP, ~15
endprologue
// Establish the expanded operands.
pxor xmm7, xmm7
- mov ecx, [ebp + 28] // -> bv
- mov edx, [ebp + 40] // -> mi
+ mov ecx, [BP + 28] // -> bv
+ mov edx, [BP + 40] // -> mi
movdqu xmm0, [ecx] // bv[0]
movdqu xmm2, [edx] // mi
expand xmm7, xmm0, xmm1, xmm2, xmm3
- movdqa [esp + 0], xmm0 // bv[0] expanded low
- movdqa [esp + 16], xmm1 // bv[0] expanded high
- movdqa [esp + 32], xmm2 // mi expanded low
- movdqa [esp + 48], xmm3 // mi expanded high
+ movdqa [SP + 0], xmm0 // bv[0] expanded low
+ movdqa [SP + 16], xmm1 // bv[0] expanded high
+ movdqa [SP + 32], xmm2 // mi expanded low
+ movdqa [SP + 48], xmm3 // mi expanded high
// Set up the outer loop state and prepare for the first iteration.
- mov edx, [ebp + 36] // n
- mov eax, [ebp + 24] // -> U = av[0]
- mov ebx, [ebp + 32] // -> X = nv[0]
- mov edi, [ebp + 20] // -> Z = dv[0]
- mov [esp + 100], ecx
+ mov edx, [BP + 36] // n
+ mov eax, [BP + 24] // -> U = av[0]
+ mov ebx, [BP + 32] // -> X = nv[0]
+ mov edi, [BP + 20] // -> Z = dv[0]
+ mov [SP + 100], ecx
lea ecx, [ecx + 4*edx] // -> bv[n/4] = bv limit
lea edx, [eax + 4*edx] // -> av[n/4] = av limit
- mov [esp + 96], edi
- mov [esp + 104], edx
- mov [esp + 108], ecx
- lea ecx, [esp + 0] // -> expanded V = bv[0]
- lea esi, [esp + 32] // -> expanded M = mi
- lea edx, [esp + 64] // -> space for Y
+ mov [SP + 96], edi
+ mov [SP + 104], edx
+ mov [SP + 108], ecx
+ lea ecx, [SP + 0] // -> expanded V = bv[0]
+ lea esi, [SP + 32] // -> expanded M = mi
+ lea edx, [SP + 64] // -> space for Y
call mmul4
- mov esi, [esp + 104] // recover av limit
+ mov esi, [SP + 104] // recover av limit
add edi, 16
add eax, 16
add ebx, 16
cmp eax, esi // done already?
jae 8f
- mov [esp + 96], edi
+ mov [SP + 96], edi
.p2align 4
// Complete the first inner loop.
// Embark on the next iteration. (There must be one. If n = 1, then
// we would have bailed above, to label 8. Similarly, the subsequent
// iterations can fall into the inner loop immediately.)
-1: mov eax, [esp + 100] // -> bv[i - 1]
- mov edi, [esp + 96] // -> Z = dv[i]
+1: mov eax, [SP + 100] // -> bv[i - 1]
+ mov edi, [SP + 96] // -> Z = dv[i]
add eax, 16 // -> bv[i]
pxor xmm7, xmm7
- mov [esp + 100], eax
- cmp eax, [esp + 108] // done yet?
+ mov [SP + 100], eax
+ cmp eax, [SP + 108] // done yet?
jae 9f
movdqu xmm0, [eax] // bv[i]
- mov ebx, [ebp + 32] // -> X = nv[0]
- lea esi, [esp + 32] // -> expanded M = mi
- mov eax, [ebp + 24] // -> U = av[0]
+ mov ebx, [BP + 32] // -> X = nv[0]
+ lea esi, [SP + 32] // -> expanded M = mi
+ mov eax, [BP + 24] // -> U = av[0]
expand xmm7, xmm0, xmm1
- movdqa [esp + 0], xmm0 // bv[i] expanded low
- movdqa [esp + 16], xmm1 // bv[i] expanded high
+ movdqa [SP + 0], xmm0 // bv[i] expanded low
+ movdqa [SP + 16], xmm1 // bv[i] expanded high
call mmla4
- mov esi, [esp + 104] // recover av limit
+ mov esi, [SP + 104] // recover av limit
add edi, 16
add eax, 16
add ebx, 16
- mov [esp + 96], edi
+ mov [SP + 96], edi
.p2align 4
// Complete the next inner loop.
popreg edi
popreg esi
popreg ebx
- popreg ebp
+ popreg BP
ret
ENDFUNC
// void mpxmont_redc4_x86_sse2(mpw *dv, mpw *dvl, const mpw *nv,
// size_t n, const mpw *mi);
- // Build a stack frame. Arguments will be relative to EBP, as
+ // Build a stack frame. Arguments will be relative to BP, as
// follows.
//
- // ebp + 20 dv
- // ebp + 24 dvl
- // ebp + 28 nv
- // ebp + 32 n (nonzero multiple of 4)
- // ebp + 36 mi
+ // BP + 20 dv
+ // BP + 24 dvl
+ // BP + 28 nv
+ // BP + 32 n (nonzero multiple of 4)
+ // BP + 36 mi
//
- // Locals are relative to ESP, as follows.
+ // Locals are relative to SP, as follows.
//
- // esp + 0 outer loop dv
- // esp + 4 outer dv limit
- // esp + 8 blocks-of-4 dv limit
- // esp + 12 expanded M (32 bytes)
- // esp + 44 expanded Y (32 bytes)
- // esp + 76 (top of locals)
- pushreg ebp
+ // SP + 0 outer loop dv
+ // SP + 4 outer dv limit
+ // SP + 8 blocks-of-4 dv limit
+ // SP + 12 expanded M (32 bytes)
+ // SP + 44 expanded Y (32 bytes)
+ // SP + 76 (top of locals)
+ pushreg BP
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
- and esp, ~15
- sub esp, 76
+ setfp
+ and SP, ~15
+ stalloc 76
endprologue
// Establish the expanded operands and the blocks-of-4 dv limit.
- mov edi, [ebp + 20] // -> Z = dv[0]
+ mov edi, [BP + 20] // -> Z = dv[0]
pxor xmm7, xmm7
- mov eax, [ebp + 24] // -> dv[n] = dv limit
+ mov eax, [BP + 24] // -> dv[n] = dv limit
sub eax, edi // length of dv in bytes
- mov edx, [ebp + 36] // -> mi
+ mov edx, [BP + 36] // -> mi
movdqu xmm0, [edx] // mi
and eax, ~15 // mask off the tail end
expand xmm7, xmm0, xmm1
add eax, edi // find limit
- movdqa [esp + 12], xmm0 // mi expanded low
- movdqa [esp + 28], xmm1 // mi expanded high
- mov [esp + 8], eax
+ movdqa [SP + 12], xmm0 // mi expanded low
+ movdqa [SP + 28], xmm1 // mi expanded high
+ mov [SP + 8], eax
// Set up the outer loop state and prepare for the first iteration.
- mov ecx, [ebp + 32] // n
- mov ebx, [ebp + 28] // -> X = nv[0]
+ mov ecx, [BP + 32] // n
+ mov ebx, [BP + 28] // -> X = nv[0]
lea edx, [edi + 4*ecx] // -> dv[n/4] = outer dv limit
lea ecx, [ebx + 4*ecx] // -> nv[n/4] = nv limit
- mov [esp + 0], edi
- mov [esp + 4], edx
- lea esi, [esp + 12] // -> expanded M = mi
- lea edx, [esp + 44] // -> space for Y
+ mov [SP + 0], edi
+ mov [SP + 4], edx
+ lea esi, [SP + 12] // -> expanded M = mi
+ lea edx, [SP + 44] // -> space for Y
call mont4
add ebx, 16
add edi, 16
// Still have carries left to propagate.
8: carryadd
- mov esi, [esp + 8] // -> dv blocks limit
- mov edx, [ebp + 24] // dv limit
+ mov esi, [SP + 8] // -> dv blocks limit
+ mov edx, [BP + 24] // dv limit
psllq xmm7, 16
pslldq xmm7, 8
paddq xmm6, xmm7
// All done for this iteration. Start the next. (This must have at
// least one follow-on iteration, or we'd not have started this outer
// loop.)
-8: mov edi, [esp + 0] // -> dv[i - 1]
- mov ebx, [ebp + 28] // -> X = nv[0]
- lea edx, [esp + 44] // -> space for Y
- lea esi, [esp + 12] // -> expanded M = mi
+8: mov edi, [SP + 0] // -> dv[i - 1]
+ mov ebx, [BP + 28] // -> X = nv[0]
+ lea edx, [SP + 44] // -> space for Y
+ lea esi, [SP + 12] // -> expanded M = mi
add edi, 16 // -> Z = dv[i]
- cmp edi, [esp + 4] // all done yet?
+ cmp edi, [SP + 4] // all done yet?
jae 9f
- mov [esp + 0], edi
+ mov [SP + 0], edi
call mont4
add edi, 16
add ebx, 16
popreg edi
popreg esi
popreg ebx
- popreg ebp
+ popreg BP
ret
ENDFUNC
.endm
.macro testprologue n
- pushreg ebp
+ pushreg BP
pushreg ebx
pushreg esi
pushreg edi
- setfp ebp
- and esp, ~15
- sub esp, 3*32 + 4*4
+ setfp
+ stalloc 3*32 + 4*4
+ and SP, ~15
endprologue
mov eax, \n
- mov [esp + 104], eax
+ mov [SP + 104], eax
// vars:
- // esp + 0 = v expanded
- // esp + 32 = y expanded
- // esp + 64 = ? expanded
- // esp + 96 = cycles
- // esp + 104 = count
+ // SP + 0 = v expanded
+ // SP + 32 = y expanded
+ // SP + 64 = ? expanded
+ // SP + 96 = cycles
+ // SP + 104 = count
.endm
.macro testepilogue
popreg edi
popreg esi
popreg ebx
- popreg ebp
+ popreg BP
ret
.endm
mov ecx, \v
movdqu xmm0, [ecx]
expand xmm7, xmm0, xmm1
- movdqa [esp + 0], xmm0
- movdqa [esp + 16], xmm1
+ movdqa [SP + 0], xmm0
+ movdqa [SP + 16], xmm1
.endif
.ifnes "\y", "nil"
mov edx, \y
movdqu xmm2, [edx]
expand xmm7, xmm2, xmm3
- movdqa [esp + 32], xmm2
- movdqa [esp + 48], xmm3
+ movdqa [SP + 32], xmm2
+ movdqa [SP + 48], xmm3
.endif
.endm
.p2align 4
0:
.ifnes "\u", "nil"
- lea ecx, [esp + 0]
+ lea ecx, [SP + 0]
.endif
mov ebx, \x
.ifeqs "\mode", "mont"
- lea esi, [esp + 32]
+ lea esi, [SP + 32]
.endif
- cysetup esp + 96
+ cysetup SP + 96
.ifnes "\u", "nil"
mov eax, \u
.endif
.ifeqs "\mode", "mont"
- lea edx, [esp + 64]
+ lea edx, [SP + 64]
.else
- lea edx, [esp + 32]
+ lea edx, [SP + 32]
.endif
.endm
.macro testtail cyv
- cystore esp + 96, \cyv, esp + 104
+ cystore SP + 96, \cyv, SP + 104
jnz 0b
.endm
.endm
FUNC(test_dmul4)
- testprologue [ebp + 44]
- testldcarry [ebp + 24]
- testexpand [ebp + 36], [ebp + 40]
- mov edi, [ebp + 20]
- testtop [ebp + 28], [ebp + 32]
+ testprologue [BP + 44]
+ testldcarry [BP + 24]
+ testexpand [BP + 36], [BP + 40]
+ mov edi, [BP + 20]
+ testtop [BP + 28], [BP + 32]
call dmul4
- testtail [ebp + 48]
- testcarryout [ebp + 24]
+ testtail [BP + 48]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC
FUNC(test_dmla4)
- testprologue [ebp + 44]
- testldcarry [ebp + 24]
- testexpand [ebp + 36], [ebp + 40]
- mov edi, [ebp + 20]
- testtop [ebp + 28], [ebp + 32]
+ testprologue [BP + 44]
+ testldcarry [BP + 24]
+ testexpand [BP + 36], [BP + 40]
+ mov edi, [BP + 20]
+ testtop [BP + 28], [BP + 32]
call dmla4
- testtail [ebp + 48]
- testcarryout [ebp + 24]
+ testtail [BP + 48]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC
FUNC(test_mul4)
- testprologue [ebp + 36]
- testldcarry [ebp + 24]
- testexpand nil, [ebp + 32]
- mov edi, [ebp + 20]
- testtop nil, [ebp + 28]
+ testprologue [BP + 36]
+ testldcarry [BP + 24]
+ testexpand nil, [BP + 32]
+ mov edi, [BP + 20]
+ testtop nil, [BP + 28]
call mul4
- testtail [ebp + 40]
- testcarryout [ebp + 24]
+ testtail [BP + 40]
+ testcarryout [BP + 24]
+ testepilogue
+ENDFUNC
+
+FUNC(test_mul4zc)
+ testprologue [BP + 36]
+ testldcarry [BP + 24]
+ testexpand nil, [BP + 32]
+ mov edi, [BP + 20]
+ testtop nil, [BP + 28]
+ call mul4zc
+ testtail [BP + 40]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC
FUNC(test_mla4)
- testprologue [ebp + 36]
- testldcarry [ebp + 24]
- testexpand nil, [ebp + 32]
- mov edi, [ebp + 20]
- testtop nil, [ebp + 28]
+ testprologue [BP + 36]
+ testldcarry [BP + 24]
+ testexpand nil, [BP + 32]
+ mov edi, [BP + 20]
+ testtop nil, [BP + 28]
call mla4
- testtail [ebp + 40]
- testcarryout [ebp + 24]
+ testtail [BP + 40]
+ testcarryout [BP + 24]
+ testepilogue
+ENDFUNC
+
+FUNC(test_mla4zc)
+ testprologue [BP + 36]
+ testldcarry [BP + 24]
+ testexpand nil, [BP + 32]
+ mov edi, [BP + 20]
+ testtop nil, [BP + 28]
+ call mla4zc
+ testtail [BP + 40]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC
FUNC(test_mmul4)
- testprologue [ebp + 48]
- testexpand [ebp + 40], [ebp + 44]
- mov edi, [ebp + 20]
- testtop [ebp + 32], [ebp + 36], mont
+ testprologue [BP + 48]
+ testexpand [BP + 40], [BP + 44]
+ mov edi, [BP + 20]
+ testtop [BP + 32], [BP + 36], mont
call mmul4
- testtail [ebp + 52]
- mov edi, [ebp + 28]
- movdqa xmm0, [esp + 64]
- movdqa xmm1, [esp + 80]
+ testtail [BP + 52]
+ mov edi, [BP + 28]
+ movdqa xmm0, [SP + 64]
+ movdqa xmm1, [SP + 80]
movdqu [edi], xmm0
movdqu [edi + 16], xmm1
- testcarryout [ebp + 24]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC
FUNC(test_mmla4)
- testprologue [ebp + 48]
- testexpand [ebp + 40], [ebp + 44]
- mov edi, [ebp + 20]
- testtop [ebp + 32], [ebp + 36], mont
+ testprologue [BP + 48]
+ testexpand [BP + 40], [BP + 44]
+ mov edi, [BP + 20]
+ testtop [BP + 32], [BP + 36], mont
call mmla4
- testtail [ebp + 52]
- mov edi, [ebp + 28]
- movdqa xmm0, [esp + 64]
- movdqa xmm1, [esp + 80]
+ testtail [BP + 52]
+ mov edi, [BP + 28]
+ movdqa xmm0, [SP + 64]
+ movdqa xmm1, [SP + 80]
movdqu [edi], xmm0
movdqu [edi + 16], xmm1
- testcarryout [ebp + 24]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC
FUNC(test_mont4)
- testprologue [ebp + 40]
- testexpand nil, [ebp + 36]
- mov edi, [ebp + 20]
- testtop nil, [ebp + 32], mont
+ testprologue [BP + 40]
+ testexpand nil, [BP + 36]
+ mov edi, [BP + 20]
+ testtop nil, [BP + 32], mont
call mont4
- testtail [ebp + 44]
- mov edi, [ebp + 28]
- movdqa xmm0, [esp + 64]
- movdqa xmm1, [esp + 80]
+ testtail [BP + 44]
+ mov edi, [BP + 28]
+ movdqa xmm0, [SP + 64]
+ movdqa xmm1, [SP + 80]
movdqu [edi], xmm0
movdqu [edi + 16], xmm1
- testcarryout [ebp + 24]
+ testcarryout [BP + 24]
testepilogue
ENDFUNC