X-Git-Url: https://git.distorted.org.uk/u/mdw/catacomb/blobdiff_plain/1b756626c7f156b987666eb548132f7bf0c161dc..8dd8c294e9f330eb6b975c2b96cf9bbfcd087e5e:/mpx-kmul.c diff --git a/mpx-kmul.c b/mpx-kmul.c index 7bb4f40..dd492ad 100644 --- a/mpx-kmul.c +++ b/mpx-kmul.c @@ -1,6 +1,6 @@ /* -*-c-*- * - * $Id: mpx-kmul.c,v 1.2 1999/12/11 10:58:02 mdw Exp $ + * $Id: mpx-kmul.c,v 1.4 2000/06/17 11:42:11 mdw Exp $ * * Karatsuba's multiplication algorithm * @@ -30,6 +30,13 @@ /*----- Revision history --------------------------------------------------* * * $Log: mpx-kmul.c,v $ + * Revision 1.4 2000/06/17 11:42:11 mdw + * Moved the Karatsuba macros into a separate file for better sharing. + * Fixed some comments. + * + * Revision 1.3 1999/12/13 15:35:01 mdw + * Simplify and improve. + * * Revision 1.2 1999/12/11 10:58:02 mdw * Remove tweakable comments. * @@ -40,9 +47,11 @@ /*----- Header files ------------------------------------------------------*/ +#include #include #include "mpx.h" +#include "mpx-kmac.h" /*----- Tweakables --------------------------------------------------------*/ @@ -51,76 +60,6 @@ # define KARATSUBA_CUTOFF 2 #endif -/*----- Addition macros ---------------------------------------------------*/ - -#define UADD(dv, av, avl) do { \ - mpw *_dv = (dv); \ - const mpw *_av = (av), *_avl = (avl); \ - mpw _c = 0; \ - \ - while (_av < _avl) { \ - mpw _a, _b; \ - mpd _x; \ - _a = *_av++; \ - _b = *_dv; \ - _x = (mpd)_a + (mpd)_b + _c; \ - *_dv++ = MPW(_x); \ - _c = _x >> MPW_BITS; \ - } \ - while (_c) { \ - mpd _x = (mpd)*_dv + (mpd)_c; \ - *_dv++ = MPW(_x); \ - _c = _x >> MPW_BITS; \ - } \ -} while (0) - -#define UADD2(dv, dvl, av, avl, bv, bvl) do { \ - mpw *_dv = (dv), *_dvl = (dvl); \ - const mpw *_av = (av), *_avl = (avl); \ - const mpw *_bv = (bv), *_bvl = (bvl); \ - mpw _c = 0; \ - \ - while (_av < _avl || _bv < _bvl) { \ - mpw _a, _b; \ - mpd _x; \ - _a = (_av < _avl) ? *_av++ : 0; \ - _b = (_bv < _bvl) ? *_bv++ : 0; \ - _x = (mpd)_a + (mpd)_b + _c; \ - *_dv++ = MPW(_x); \ - _c = _x >> MPW_BITS; \ - } \ - *_dv++ = _c; \ - while (_dv < _dvl) \ - *_dv++ = 0; \ -} while (0) - -#define USUB(dv, av, avl) do { \ - mpw *_dv = (dv); \ - const mpw *_av = (av), *_avl = (avl); \ - mpw _c = 0; \ - \ - while (_av < _avl) { \ - mpw _a, _b; \ - mpd _x; \ - _a = *_av++; \ - _b = *_dv; \ - _x = (mpd)_b - (mpd)_a - _c; \ - *_dv++ = MPW(_x); \ - if (_x >> MPW_BITS) \ - _c = 1; \ - else \ - _c = 0; \ - } \ - while (_c) { \ - mpd _x = (mpd)*_dv - (mpd)_c; \ - *_dv++ = MPW(_x); \ - if (_x >> MPW_BITS) \ - _c = 1; \ - else \ - _c = 0; \ - } \ -} while (0) - /*----- Main code ---------------------------------------------------------*/ /* --- @mpx_kmul@ --- * @@ -168,11 +107,11 @@ void mpx_kmul(mpw *dv, mpw *dvl, /* --- How the algorithm works --- * * - * Let %$A = xb + y$% and %$B = ub + v$%. Then, simply by expanding, %$AB - * = x u b^2 + b(x v + y u) + y v$%. That's not helped any, because I've - * got four multiplications, each four times easier than the one I started - * with. However, note that I can rewrite the coefficient of %$b$% as - * %$xv + yu = (x + y)(u + v) - xu - yv$%. The terms %$xu$% and %$yv$% + * Let %$A = xb + y$% and %$B = ub + v$%. Then, simply by expanding, + * %$AB = x u b^2 + b(x v + y u) + y v$%. That's not helped any, because + * I've got four multiplications, each four times easier than the one I + * started with. However, note that I can rewrite the coefficient of %$b$% + * as %$xv + yu = (x + y)(u + v) - xu - yv$%. The terms %$xu$% and %$yv$% * I've already calculated, and that leaves only one more multiplication to * do. So now I have three multiplications, each four times easier, and * that's a win. @@ -201,64 +140,51 @@ void mpx_kmul(mpw *dv, mpw *dvl, avm = avl; } - /* --- Sort out the middle term --- * - * - * I'm going to keep track of the carry by hand rather than pass it down to - * the next level, because it means multiplication by one or zero, which I - * can do easily myself. - */ + assert(((void)"Destination too small for Karatsuba multiply", + dvl - dv >= 4 * m)); + assert(((void)"Not enough workspace for Karatsuba multiply", + svl - sv >= 4 * m)); + + /* --- Sort out the middle term --- */ { - unsigned f = 0; - enum { - carry_a = 1, - carry_b = 2 - }; - - mpw *bsv = sv + m, *ssv = bsv + m; - mpw *rdv = dv + m, *rdvl = rdv + 2 * m; - - UADD2(sv, bsv + 1, av, avm, avm, avl); - if (*bsv) - f |= carry_a; - UADD2(bsv, ssv + 1, bv, bvm, bvm, bvl); - if (*ssv) - f |= carry_b; - MPX_ZERO(dv, rdv); + mpw *bsv = sv + m + 1, *ssv = bsv + m + 1; + mpw *rdv = dv + m, *rdvl = rdv + 2 * (m + 2); + + UADD2(sv, bsv, av, avm, avm, avl); + UADD2(bsv, ssv, bv, bvm, bvm, bvl); if (m > KARATSUBA_CUTOFF) mpx_kmul(rdv, rdvl, sv, bsv, bsv, ssv, ssv, svl); else mpx_umul(rdv, rdvl, sv, bsv, bsv, ssv); - MPX_ZERO(rdvl, dvl); - rdv += m; rdvl += m; - if (f & carry_b) - UADD(rdv, sv, bsv); - if (f & carry_a) - UADD(rdv, bsv, ssv); - if (!(~f & (carry_a | carry_b))) - MPX_UADDN(rdv + m, rdvl, 1); } /* --- Sort out the other two terms --- */ { - mpw *ssv = sv + 2 * m; + mpw *svm = sv + m, *svn = svm + m, *ssv = svn + 4; mpw *tdv = dv + m; mpw *rdv = tdv + m; - if (m > KARATSUBA_CUTOFF) - mpx_kmul(sv, ssv, avm, avl, bvm, bvl, ssv, svl); - else - mpx_umul(sv, ssv, avm, avl, bvm, bvl); - UADD(rdv, sv, ssv); - USUB(tdv, sv, ssv); - + if (avl == avm || bvl == bvm) + MPX_ZERO(rdv + m + 1, dvl); + else { + if (m > KARATSUBA_CUTOFF) + mpx_kmul(sv, ssv, avm, avl, bvm, bvl, ssv, svl); + else + mpx_umul(sv, ssv, avm, avl, bvm, bvl); + MPX_COPY(rdv + m + 1, dvl, svm + 1, svn); + UADD(rdv, sv, svm + 1); + USUB(tdv, sv, svn); + } + if (m > KARATSUBA_CUTOFF) mpx_kmul(sv, ssv, av, avm, bv, bvm, ssv, svl); else mpx_umul(sv, ssv, av, avm, bv, bvm); - USUB(tdv, sv, ssv); - UADD(dv, sv, ssv); + MPX_COPY(dv, tdv, sv, svm); + USUB(tdv, sv, svn); + UADD(tdv, svm, svn); } }