X-Git-Url: https://git.distorted.org.uk/~mdw/catacomb/blobdiff_plain/2aaa07f8c724ca7230ea6c23e3ab8f337fd91999..d0d41c6ebfbfebca8dbb516a1de4107c82b1bc6b:/math/mpx-mul4-x86-sse2.S diff --git a/math/mpx-mul4-x86-sse2.S b/math/mpx-mul4-x86-sse2.S index 0b57dbfd..baf7cc50 100644 --- a/math/mpx-mul4-x86-sse2.S +++ b/math/mpx-mul4-x86-sse2.S @@ -93,7 +93,7 @@ ///-------------------------------------------------------------------------- /// Macro definitions. -.macro mulcore r, s, d0, d1, d2, d3 +.macro mulcore r, s, d0, d1=nil, d2=nil, d3=nil // Load a word r_i from R, multiply by the expanded operand [S], and // leave the pieces of the product in registers D0, D1, D2, D3. movd \d0, \r // (r_i, 0, 0, 0) @@ -133,7 +133,10 @@ pmuludq \d0, [\s] // (r_i s'_0, r_i s''_0) .endm -.macro accum c0, c1, c2, c3 +.macro accum c0, c1=nil, c2=nil, c3=nil + // Accumulate 64-bit pieces in XMM0--XMM3 into the corresponding + // carry registers C0--C3. Any or all of C1--C3 may be `nil' to skip + // updating that register. paddq \c0, xmm0 .ifnes "\c1", "nil" paddq \c1, xmm1 @@ -146,7 +149,7 @@ .endif .endm -.macro mulacc r, s, c0, c1, c2, c3, z3p +.macro mulacc r, s, c0, c1, c2, c3, z3p=nil // Load a word r_i from R, multiply by the expanded operand [S], // and accumulate in carry registers C0, C1, C2, C3. If Z3P is `t' // then C3 notionally contains zero, but needs clearing; in practice, @@ -155,14 +158,14 @@ // is not `t'. .ifeqs "\z3p", "t" mulcore \r, \s, xmm0, xmm1, xmm2, \c3 - accum \c0, \c1, \c2, nil + accum \c0, \c1, \c2 .else mulcore \r, \s, xmm0, xmm1, xmm2, xmm3 accum \c0, \c1, \c2, \c3 .endif .endm -.macro propout d, c, cc +.macro propout d, c, cc=nil // Calculate an output word from C, and store it in D; propagate // carries out from C to CC in preparation for a rotation of the // carry registers. On completion, XMM3 is clobbered. If CC is @@ -192,7 +195,7 @@ psrldq \t, 4 // floor((c' + c'' b)/B) .endm -.macro expand a, b, c, d, z +.macro expand z, a, b, c=nil, d=nil // On entry, A and C hold packed 128-bit values, and Z is zero. On // exit, A:B and C:D together hold the same values in expanded // form. If C is `nil', then only expand A to A:B. @@ -214,7 +217,7 @@ .endif .endm -.macro squash lo, hi, c0, c1, c2, c3, t, u +.macro squash c0, c1, c2, c3, t, u, lo, hi=nil // On entry, C0, C1, C2, C3 are carry registers representing a value // Y. On exit, LO holds the low 128 bits of the carry value; C1, C2, // C3, T, and U are clobbered; and the high bits of Y are stored in @@ -331,19 +334,19 @@ INTFUNC(dmul4) endprologue mulacc [eax + 0], ecx, xmm4, xmm5, xmm6, xmm7, t - mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7, nil + mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 mulacc [eax + 4], ecx, xmm5, xmm6, xmm7, xmm4, t - mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4, nil + mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4 propout [edi + 4], xmm5, xmm6 mulacc [eax + 8], ecx, xmm6, xmm7, xmm4, xmm5, t - mulacc [ebx + 8], edx, xmm6, xmm7, xmm4, xmm5, nil + mulacc [ebx + 8], edx, xmm6, xmm7, xmm4, xmm5 propout [edi + 8], xmm6, xmm7 mulacc [eax + 12], ecx, xmm7, xmm4, xmm5, xmm6, t - mulacc [ebx + 12], edx, xmm7, xmm4, xmm5, xmm6, nil + mulacc [ebx + 12], edx, xmm7, xmm4, xmm5, xmm6 propout [edi + 12], xmm7, xmm4 ret @@ -366,20 +369,20 @@ INTFUNC(dmla4) carryadd - mulacc [eax + 0], ecx, xmm4, xmm5, xmm6, xmm7, nil - mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7, nil + mulacc [eax + 0], ecx, xmm4, xmm5, xmm6, xmm7 + mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 mulacc [eax + 4], ecx, xmm5, xmm6, xmm7, xmm4, t - mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4, nil + mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4 propout [edi + 4], xmm5, xmm6 mulacc [eax + 8], ecx, xmm6, xmm7, xmm4, xmm5, t - mulacc [ebx + 8], edx, xmm6, xmm7, xmm4, xmm5, nil + mulacc [ebx + 8], edx, xmm6, xmm7, xmm4, xmm5 propout [edi + 8], xmm6, xmm7 mulacc [eax + 12], ecx, xmm7, xmm4, xmm5, xmm6, t - mulacc [ebx + 12], edx, xmm7, xmm4, xmm5, xmm6, nil + mulacc [ebx + 12], edx, xmm7, xmm4, xmm5, xmm6 propout [edi + 12], xmm7, xmm4 ret @@ -456,7 +459,7 @@ INTFUNC(mla4zc) movd xmm6, [edi + 8] movd xmm7, [edi + 12] - mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7, nil + mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4, t @@ -487,7 +490,7 @@ INTFUNC(mla4) carryadd - mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7, nil + mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4, t @@ -508,14 +511,13 @@ INTFUNC(mmul4) // to the packed operands U and N; ECX and ESI point to the expanded // operands V and M; and EDX points to a place to store an expanded // result Y (32 bytes, at a 16-byte boundary). The stack pointer - // must be 16-byte aligned. (This is not the usual convention, which - // requires alignment before the call.) + // must be 12 modulo 16, as is usual for modern x86 ABIs. // // On exit, we write Y = U V M mod B to [EDX], and the low 128 bits // of the sum U V + N Y to [EDI], leaving the remaining carry in // XMM4, XMM5, and XMM6. The registers XMM0, XMM1, XMM2, XMM3, and // XMM7 are clobbered; the general-purpose registers are preserved. - stalloc 48 // space for the carries + stalloc 48 + 12 // space for the carries endprologue // Calculate W = U V, and leave it in the destination. Stash the @@ -528,26 +530,28 @@ ENDFUNC INTFUNC(mmla4) // On entry, EDI points to the destination buffer, which also - // contains an addend A to accumulate; EAX and EBX point - // to the packed operands U and N; ECX and ESI point to the expanded + // contains an addend A to accumulate; EAX and EBX point to the + // packed operands U and N; ECX and ESI point to the expanded // operands V and M; and EDX points to a place to store an expanded // result Y (32 bytes, at a 16-byte boundary). The stack pointer - // must be 16-byte aligned. (This is not the usual convention, which - // requires alignment before the call.) + // must be 12 modulo 16, as is usual for modern x86 ABIs. // // On exit, we write Y = (A + U V) M mod B to [EDX], and the low 128 // bits of the sum A + U V + N Y to [EDI], leaving the remaining // carry in XMM4, XMM5, and XMM6. The registers XMM0, XMM1, XMM2, // XMM3, and XMM7 are clobbered; the general-purpose registers are // preserved. - stalloc 48 // space for the carries + stalloc 48 + 12 // space for the carries endprologue movd xmm4, [edi + 0] movd xmm5, [edi + 4] movd xmm6, [edi + 8] movd xmm7, [edi + 12] - mulacc [eax + 0], ecx, xmm4, xmm5, xmm6, xmm7, nil + + // Calculate W = U V, and leave it in the destination. Stash the + // carry pieces for later. + mulacc [eax + 0], ecx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 5: mulacc [eax + 4], ecx, xmm5, xmm6, xmm7, xmm4, t @@ -566,21 +570,21 @@ INTFUNC(mmla4) // Calculate Y = W M. mulcore [edi + 0], esi, xmm4, xmm5, xmm6, xmm7 - mulcore [edi + 4], esi, xmm0, xmm1, xmm2, nil - accum xmm5, xmm6, xmm7, nil + mulcore [edi + 4], esi, xmm0, xmm1, xmm2 + accum xmm5, xmm6, xmm7 - mulcore [edi + 8], esi, xmm0, xmm1, nil, nil - accum xmm6, xmm7, nil, nil + mulcore [edi + 8], esi, xmm0, xmm1 + accum xmm6, xmm7 - mulcore [edi + 12], esi, xmm0, nil, nil, nil - accum xmm7, nil, nil, nil + mulcore [edi + 12], esi, xmm0 + accum xmm7 // That's lots of pieces. Now we have to assemble the answer. - squash xmm4, nil, xmm4, xmm5, xmm6, xmm7, xmm0, xmm1 + squash xmm4, xmm5, xmm6, xmm7, xmm0, xmm1, xmm4 // Expand it. pxor xmm2, xmm2 - expand xmm4, xmm1, nil, nil, xmm2 + expand xmm2, xmm4, xmm1 movdqa [edx + 0], xmm4 movdqa [edx + 16], xmm1 @@ -591,7 +595,7 @@ INTFUNC(mmla4) movd xmm7, [edi + 12] // Finish the calculation by adding the Montgomery product. - mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7, nil + mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4, t @@ -609,7 +613,7 @@ INTFUNC(mmla4) paddq xmm6, [esp + 32] // And, with that, we're done. - stfree 48 + stfree 48 + 12 ret ENDFUNC @@ -629,21 +633,21 @@ INTFUNC(mont4) // Calculate Y = W M. mulcore [edi + 0], esi, xmm4, xmm5, xmm6, xmm7 - mulcore [edi + 4], esi, xmm0, xmm1, xmm2, nil - accum xmm5, xmm6, xmm7, nil + mulcore [edi + 4], esi, xmm0, xmm1, xmm2 + accum xmm5, xmm6, xmm7 - mulcore [edi + 8], esi, xmm0, xmm1, nil, nil - accum xmm6, xmm7, nil, nil + mulcore [edi + 8], esi, xmm0, xmm1 + accum xmm6, xmm7 - mulcore [edi + 12], esi, xmm0, nil, nil, nil - accum xmm7, nil, nil, nil + mulcore [edi + 12], esi, xmm0 + accum xmm7 // That's lots of pieces. Now we have to assemble the answer. - squash xmm4, nil, xmm4, xmm5, xmm6, xmm7, xmm0, xmm1 + squash xmm4, xmm5, xmm6, xmm7, xmm0, xmm1, xmm4 // Expand it. pxor xmm2, xmm2 - expand xmm4, xmm1, nil, nil, xmm2 + expand xmm2, xmm4, xmm1 movdqa [edx + 0], xmm4 movdqa [edx + 16], xmm1 @@ -654,7 +658,7 @@ INTFUNC(mont4) movd xmm7, [edi + 12] // Finish the calculation by adding the Montgomery product. - mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7, nil + mulacc [ebx + 0], edx, xmm4, xmm5, xmm6, xmm7 propout [edi + 0], xmm4, xmm5 mulacc [ebx + 4], edx, xmm5, xmm6, xmm7, xmm4, t @@ -706,7 +710,7 @@ FUNC(mpx_umul4_x86_sse2) movdqu xmm0, [esi] // bv[0] mov edi, [ebp + 20] // -> dv[0] mov ecx, edi // outer loop dv cursor - expand xmm0, xmm1, nil, nil, xmm7 + expand xmm7, xmm0, xmm1 mov ebx, [ebp + 24] // -> av[0] mov eax, [ebp + 28] // -> av[m] = av limit mov edx, esp // -> expanded Y = bv[0] @@ -738,7 +742,7 @@ FUNC(mpx_umul4_x86_sse2) 1: movdqu xmm0, [esi] // bv[i] mov edi, ecx // -> dv[i] pxor xmm7, xmm7 - expand xmm0, xmm1, nil, nil, xmm7 + expand xmm7, xmm0, xmm1 mov ebx, [ebp + 24] // -> av[0] movdqa [esp + 0], xmm0 // bv[i] expanded low movdqa [esp + 16], xmm1 // bv[i] expanded high @@ -788,24 +792,23 @@ FUNC(mpxmont_mul4_x86_sse2) // ebp + 36 n (nonzero multiple of 4) // ebp + 40 mi // - // Locals are relative to ESP, which is 4 mod 16, as follows. + // Locals are relative to ESP, which 16-byte aligned, as follows. // - // esp + 0 outer loop dv - // esp + 4 outer loop bv - // esp + 8 av limit (mostly in ESI) - // esp + 12 expanded V (32 bytes) - // esp + 44 expanded M (32 bytes) - // esp + 76 expanded Y (32 bytes) + // esp + 0 expanded V (32 bytes) + // esp + 32 expanded M (32 bytes) + // esp + 64 expanded Y (32 bytes) + // esp + 96 outer loop dv + // esp + 100 outer loop bv + // esp + 104 av limit (mostly in ESI) // esp + 108 bv limit - // esp + 112 (gap) - // esp + 124 (top of locals) + // esp + 112 (top of locals) pushreg ebp pushreg ebx pushreg esi pushreg edi setfp ebp and esp, ~15 - sub esp, 124 + sub esp, 112 endprologue // Establish the expanded operands. @@ -814,34 +817,34 @@ FUNC(mpxmont_mul4_x86_sse2) mov edx, [ebp + 40] // -> mi movdqu xmm0, [ecx] // bv[0] movdqu xmm2, [edx] // mi - expand xmm0, xmm1, xmm2, xmm3, xmm7 - movdqa [esp + 12], xmm0 // bv[0] expanded low - movdqa [esp + 28], xmm1 // bv[0] expanded high - movdqa [esp + 44], xmm2 // mi expanded low - movdqa [esp + 60], xmm3 // mi expanded high + expand xmm7, xmm0, xmm1, xmm2, xmm3 + movdqa [esp + 0], xmm0 // bv[0] expanded low + movdqa [esp + 16], xmm1 // bv[0] expanded high + movdqa [esp + 32], xmm2 // mi expanded low + movdqa [esp + 48], xmm3 // mi expanded high // Set up the outer loop state and prepare for the first iteration. mov edx, [ebp + 36] // n mov eax, [ebp + 24] // -> U = av[0] mov ebx, [ebp + 32] // -> X = nv[0] mov edi, [ebp + 20] // -> Z = dv[0] - mov [esp + 4], ecx + mov [esp + 100], ecx lea ecx, [ecx + 4*edx] // -> bv[n/4] = bv limit lea edx, [eax + 4*edx] // -> av[n/4] = av limit - mov [esp + 0], edi + mov [esp + 96], edi + mov [esp + 104], edx mov [esp + 108], ecx - mov [esp + 8], edx - lea ecx, [esp + 12] // -> expanded V = bv[0] - lea esi, [esp + 44] // -> expanded M = mi - lea edx, [esp + 76] // -> space for Y + lea ecx, [esp + 0] // -> expanded V = bv[0] + lea esi, [esp + 32] // -> expanded M = mi + lea edx, [esp + 64] // -> space for Y call mmul4 - mov esi, [esp + 8] // recover av limit + mov esi, [esp + 104] // recover av limit add edi, 16 add eax, 16 add ebx, 16 cmp eax, esi // done already? jae 8f - mov [esp + 0], edi + mov [esp + 96], edi .p2align 4 // Complete the first inner loop. @@ -860,26 +863,26 @@ FUNC(mpxmont_mul4_x86_sse2) // Embark on the next iteration. (There must be one. If n = 1, then // we would have bailed above, to label 8. Similarly, the subsequent // iterations can fall into the inner loop immediately.) -1: mov eax, [esp + 4] // -> bv[i - 1] - mov edi, [esp + 0] // -> Z = dv[i] +1: mov eax, [esp + 100] // -> bv[i - 1] + mov edi, [esp + 96] // -> Z = dv[i] add eax, 16 // -> bv[i] pxor xmm7, xmm7 - movdqu xmm0, [eax] // bv[i] - mov [esp + 4], eax + mov [esp + 100], eax cmp eax, [esp + 108] // done yet? jae 9f + movdqu xmm0, [eax] // bv[i] mov ebx, [ebp + 32] // -> X = nv[0] - lea esi, [esp + 44] // -> expanded M = mi + lea esi, [esp + 32] // -> expanded M = mi mov eax, [ebp + 24] // -> U = av[0] - expand xmm0, xmm1, nil, nil, xmm7 - movdqa [esp + 12], xmm0 // bv[i] expanded low - movdqa [esp + 28], xmm1 // bv[i] expanded high + expand xmm7, xmm0, xmm1 + movdqa [esp + 0], xmm0 // bv[i] expanded low + movdqa [esp + 16], xmm1 // bv[i] expanded high call mmla4 - mov esi, [esp + 8] // recover av limit + mov esi, [esp + 104] // recover av limit add edi, 16 add eax, 16 add ebx, 16 - mov [esp + 0], edi + mov [esp + 96], edi .p2align 4 // Complete the next inner loop. @@ -954,7 +957,7 @@ FUNC(mpxmont_redc4_x86_sse2) mov edx, [ebp + 36] // -> mi movdqu xmm0, [edx] // mi and eax, ~15 // mask off the tail end - expand xmm0, xmm1, nil, nil, xmm7 + expand xmm7, xmm0, xmm1 add eax, edi // find limit movdqa [esp + 12], xmm0 // mi expanded low movdqa [esp + 28], xmm1 // mi expanded high @@ -970,8 +973,8 @@ FUNC(mpxmont_redc4_x86_sse2) lea esi, [esp + 12] // -> expanded M = mi lea edx, [esp + 44] // -> space for Y call mont4 - add edi, 16 add ebx, 16 + add edi, 16 cmp ebx, ecx // done already? jae 8f @@ -1065,20 +1068,23 @@ ENDFUNC mov [ebx + ecx*8 + 4], edx .endm -.macro testprologue +.macro testprologue n pushreg ebp pushreg ebx pushreg esi pushreg edi setfp ebp and esp, ~15 - sub esp, 3*32 + 12 + sub esp, 3*32 + 4*4 endprologue + mov eax, \n + mov [esp + 104], eax // vars: - // esp + 0 = cycles - // esp + 12 = v expanded - // esp + 44 = y expanded - // esp + 72 = ? expanded + // esp + 0 = v expanded + // esp + 32 = y expanded + // esp + 64 = ? expanded + // esp + 96 = cycles + // esp + 104 = count .endm .macro testepilogue @@ -1097,47 +1103,47 @@ ENDFUNC movdqu xmm6, [ecx + 32] // (c'_2, c''_2) .endm -.macro testexpand v, y +.macro testexpand v=nil, y=nil pxor xmm7, xmm7 .ifnes "\v", "nil" mov ecx, \v movdqu xmm0, [ecx] - expand xmm0, xmm1, nil, nil, xmm7 - movdqa [esp + 12], xmm0 - movdqa [esp + 28], xmm1 + expand xmm7, xmm0, xmm1 + movdqa [esp + 0], xmm0 + movdqa [esp + 16], xmm1 .endif .ifnes "\y", "nil" mov edx, \y movdqu xmm2, [edx] - expand xmm2, xmm3, nil, nil, xmm7 - movdqa [esp + 44], xmm2 - movdqa [esp + 60], xmm3 + expand xmm7, xmm2, xmm3 + movdqa [esp + 32], xmm2 + movdqa [esp + 48], xmm3 .endif .endm -.macro testtop u, x, mode +.macro testtop u=nil, x=nil, mode=nil .p2align 4 0: .ifnes "\u", "nil" - lea ecx, [esp + 12] + lea ecx, [esp + 0] .endif mov ebx, \x .ifeqs "\mode", "mont" - lea esi, [esp + 44] + lea esi, [esp + 32] .endif - cysetup esp + 0 + cysetup esp + 96 .ifnes "\u", "nil" mov eax, \u .endif .ifeqs "\mode", "mont" - lea edx, [esp + 76] + lea edx, [esp + 64] .else - lea edx, [esp + 44] + lea edx, [esp + 32] .endif .endm -.macro testtail cyv, n - cystore esp + 0, \cyv, \n +.macro testtail cyv + cystore esp + 96, \cyv, esp + 104 jnz 0b .endm @@ -1149,63 +1155,87 @@ ENDFUNC .endm FUNC(test_dmul4) - testprologue + testprologue [ebp + 44] testldcarry [ebp + 24] testexpand [ebp + 36], [ebp + 40] mov edi, [ebp + 20] testtop [ebp + 28], [ebp + 32] call dmul4 - testtail [ebp + 48], [ebp + 44] + testtail [ebp + 48] testcarryout [ebp + 24] testepilogue ENDFUNC FUNC(test_dmla4) - testprologue + testprologue [ebp + 44] testldcarry [ebp + 24] testexpand [ebp + 36], [ebp + 40] mov edi, [ebp + 20] testtop [ebp + 28], [ebp + 32] call dmla4 - testtail [ebp + 48], [ebp + 44] + testtail [ebp + 48] testcarryout [ebp + 24] testepilogue ENDFUNC FUNC(test_mul4) - testprologue + testprologue [ebp + 36] testldcarry [ebp + 24] testexpand nil, [ebp + 32] mov edi, [ebp + 20] testtop nil, [ebp + 28] call mul4 - testtail [ebp + 40], [ebp + 36] + testtail [ebp + 40] + testcarryout [ebp + 24] + testepilogue +ENDFUNC + +FUNC(test_mul4zc) + testprologue [ebp + 36] + testldcarry [ebp + 24] + testexpand nil, [ebp + 32] + mov edi, [ebp + 20] + testtop nil, [ebp + 28] + call mul4zc + testtail [ebp + 40] testcarryout [ebp + 24] testepilogue ENDFUNC FUNC(test_mla4) - testprologue + testprologue [ebp + 36] testldcarry [ebp + 24] testexpand nil, [ebp + 32] mov edi, [ebp + 20] testtop nil, [ebp + 28] call mla4 - testtail [ebp + 40], [ebp + 36] + testtail [ebp + 40] + testcarryout [ebp + 24] + testepilogue +ENDFUNC + +FUNC(test_mla4zc) + testprologue [ebp + 36] + testldcarry [ebp + 24] + testexpand nil, [ebp + 32] + mov edi, [ebp + 20] + testtop nil, [ebp + 28] + call mla4zc + testtail [ebp + 40] testcarryout [ebp + 24] testepilogue ENDFUNC FUNC(test_mmul4) - testprologue + testprologue [ebp + 48] testexpand [ebp + 40], [ebp + 44] mov edi, [ebp + 20] testtop [ebp + 32], [ebp + 36], mont call mmul4 - testtail [ebp + 52], [ebp + 48] + testtail [ebp + 52] mov edi, [ebp + 28] - movdqa xmm0, [esp + 76] - movdqa xmm1, [esp + 92] + movdqa xmm0, [esp + 64] + movdqa xmm1, [esp + 80] movdqu [edi], xmm0 movdqu [edi + 16], xmm1 testcarryout [ebp + 24] @@ -1213,15 +1243,15 @@ FUNC(test_mmul4) ENDFUNC FUNC(test_mmla4) - testprologue + testprologue [ebp + 48] testexpand [ebp + 40], [ebp + 44] mov edi, [ebp + 20] testtop [ebp + 32], [ebp + 36], mont call mmla4 - testtail [ebp + 52], [ebp + 48] + testtail [ebp + 52] mov edi, [ebp + 28] - movdqa xmm0, [esp + 76] - movdqa xmm1, [esp + 92] + movdqa xmm0, [esp + 64] + movdqa xmm1, [esp + 80] movdqu [edi], xmm0 movdqu [edi + 16], xmm1 testcarryout [ebp + 24] @@ -1229,15 +1259,15 @@ FUNC(test_mmla4) ENDFUNC FUNC(test_mont4) - testprologue + testprologue [ebp + 40] testexpand nil, [ebp + 36] mov edi, [ebp + 20] testtop nil, [ebp + 32], mont call mont4 - testtail [ebp + 44], [ebp + 40] + testtail [ebp + 44] mov edi, [ebp + 28] - movdqa xmm0, [esp + 76] - movdqa xmm1, [esp + 92] + movdqa xmm0, [esp + 64] + movdqa xmm1, [esp + 80] movdqu [edi], xmm0 movdqu [edi + 16], xmm1 testcarryout [ebp + 24]