X-Git-Url: https://git.distorted.org.uk/u/mdw/catacomb/blobdiff_plain/79a340293970d5f9b9c000f82769179f9ba551bd..898a4e2555438ff8adb08b4d82690d08715e1048:/mpmont-mexp.c diff --git a/mpmont-mexp.c b/mpmont-mexp.c index 1d1266f..7589990 100644 --- a/mpmont-mexp.c +++ b/mpmont-mexp.c @@ -1,8 +1,8 @@ /* -*-c-*- * - * $Id: mpmont-mexp.c,v 1.2 1999/11/21 11:35:10 mdw Exp $ + * $Id: mpmont-mexp.c,v 1.8 2004/04/01 12:50:09 mdw Exp $ * - * Multiplle simultaneous exponentiations + * Multiple simultaneous exponentiations * * (c) 1999 Straylight/Edgeware */ @@ -30,6 +30,30 @@ /*----- Revision history --------------------------------------------------* * * $Log: mpmont-mexp.c,v $ + * Revision 1.8 2004/04/01 12:50:09 mdw + * Add cyclic group abstraction, with test code. Separate off exponentation + * functions for better static linking. Fix a buttload of bugs on the way. + * Generally ensure that negative exponents do inversion correctly. Add + * table of standard prime-field subgroups. (Binary field subgroups are + * currently unimplemented but easy to add if anyone ever finds a good one.) + * + * Revision 1.7 2002/01/13 13:49:14 mdw + * Make @const@-correct. + * + * Revision 1.6 2001/06/16 13:00:20 mdw + * Use the generic exponentiation functions. + * + * Revision 1.5 2000/10/08 12:11:22 mdw + * Use @MP_EQ@ instead of @MP_CMP@. + * + * Revision 1.4 2000/06/17 11:45:09 mdw + * Major memory management overhaul. Added arena support. Use the secure + * arena for secret integers. Replace and improve the MP management macros + * (e.g., replace MP_MODIFY by MP_DEST). + * + * Revision 1.3 1999/12/10 23:18:39 mdw + * Change interface for suggested destinations. + * * Revision 1.2 1999/11/21 11:35:10 mdw * Performance improvement: use @mp_sqr@ and @mpmont_reduce@ instead of * @mpmont_mul@ for squaring in exponentiation. @@ -44,155 +68,72 @@ #include "mp.h" #include "mpmont.h" +#define EXP_WINSZ 3 +#include "mpmont-exp.h" + /*----- Main code ---------------------------------------------------------*/ /* --- @mpmont_mexpr@ --- * * * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context - * @mpmont_factor *f@ = pointer to array of factors + * @mp *d@ = fake destination + * @const mp_expfactor *f@ = pointer to array of factors * @size_t n@ = number of factors supplied * * Returns: If the bases are %$g_0, g_1, \ldots, g_{n-1}$% and the * exponents are %$e_0, e_1, \ldots, e_{n-1}$% then the result * is: * - * %$g_0^{e_0} g_1^{e_1} \ldots g_{n-1}^{e_{n-1}} R \bmod m$% + * %$g_0^{e_0} g_1^{e_1} \ldots g_{n-1}^{e_{n-1}} \bmod m$% + * + * except that the %$g_i$% and result are in Montgomery form. */ -typedef struct scan { - size_t len; - mpw w; -} scan; - -mp *mpmont_mexpr(mpmont *mm, mpmont_factor *f, size_t n) +static mp *mexpr(mpmont *mm, mp *d, mp_expfactor *f, size_t n) { - size_t vn = 1 << n; - mp **v = xmalloc(vn * sizeof(mp *)); - scan *s; - size_t o; - unsigned b; mp *a = MP_COPY(mm->r); mp *spare = MP_NEW; + mp *g = MP_NEW; + size_t i; - /* --- Perform the precomputation --- */ - - { - size_t i, j; - size_t mask; - - /* --- Fill in the rest of the array --- * - * - * Zero never gets used. - */ - - j = 0; - mask = 0; - for (i = 1; i < vn; i++) { - - /* --- Check for a new bit entering --- * - * - * If a bit gets set that wasn't set before, then all the lower bits - * are zeroes and I've got to introduce a new base into the array. - */ - - if ((i & mask) == 0) { - v[i] = mpmont_mul(mm, MP_NEW, f[j++].base, mm->r2); - mask = i; - } - - /* --- Otherwise I can get away with a single multiplication --- * - * - * In particular, if %$i$% has more than one bit set, then I only need - * to calculate %$v_i = v_{\mathit{mask}} v_{i - \mathit{mask}}$%. - * Since both are less than %$i$%, they must have already been - * computed. - */ - - else - v[i] = mpmont_mul(mm, MP_NEW, v[mask], v[i & ~mask]); - } - } - - /* --- Set up the bitscanners --- * - * - * I must scan the exponents from left to right, which is a shame. It - * means that I can't use the standard @mpscan@ stuff, in particular. - */ - - { - size_t i; - - s = xmalloc(n * sizeof(scan)); - o = 0; - for (i = 0; i < n; i++) { - s[i].len = MP_LEN(f[i].exp); - if (s[i].len > o) - o = s[i].len; - } - b = 0; - } - - /* --- Now do the actual calculation --- */ - - b = 0; - for (;;) { - size_t i; - size_t j; - mp *dd; - - /* --- If no more bits, get some more --- */ - - if (!b) { - if (!o) - break; - o--; - b = MPW_BITS; - } - - /* --- Work out the next index --- */ - - j = 0; - b--; - for (i = 0; i < n; i++) { - if (o < s[i].len) - j |= (((f[i].exp->v[o] >> b) & 1) << i); - } - - /* --- Accumulate the result --- */ - - if (spare) { - dd = mp_sqr(spare, a); - dd = mpmont_reduce(mm, dd, dd); - spare = a; - a = dd; - } - - if (j) { - dd = mpmont_mul(mm, spare, a, v[j]); - spare = a; - a = dd; + for (i = 0; i < n; i++) { + mp *t; + if (f[i].exp->f & MP_BURN) + spare = MP_NEWSEC; + if (f[i].exp->f & MP_NEG) { + t = mpmont_reduce(mm, f[i].base, f[i].base); + mp_gcd(&g, 0, &t, mm->m, t); + assert(MP_EQ(g, MP_ONE)); + f[i].base = mpmont_mul(mm, t, t, mm->r2); } } + mp_drop(g); + EXP_SIMUL(a, f, n); + mp_drop(d); + mp_drop(spare); + for (i = 0; i < n; i++) + MP_DROP(f[i].base); + xfree(f); + return (a); +} - /* --- Tidy up afterwards --- */ +mp *mpmont_mexpr(mpmont *mm, mp *d, const mp_expfactor *f, size_t n) +{ + mp_expfactor *ff = xmalloc(n * sizeof(mp_expfactor)); + size_t i; - { - size_t i; - for (i = 1; i < vn; i++) - MP_DROP(v[i]); - if (spare) - MP_DROP(spare); - free(v); - free(s); + for (i = 0; i < n; i++) { + ff[i].base = MP_COPY(f[i].base); + ff[i].exp = f[i].exp; } - - return (a); + return (mexpr(mm, d, ff, n)); } /* --- @mpmont_mexp@ --- * * * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context - * @mpmont_factor *f@ = pointer to array of factors + * @mp *d@ = fake destination + * @const mp_expfactor *f@ = pointer to array of factors * @size_t n@ = number of factors supplied * * Returns: Product of bases raised to exponents, all mod @m@. @@ -200,11 +141,17 @@ mp *mpmont_mexpr(mpmont *mm, mpmont_factor *f, size_t n) * Use: Convenient interface over @mpmont_mexpr@. */ -mp *mpmont_mexp(mpmont *mm, mpmont_factor *f, size_t n) +mp *mpmont_mexp(mpmont *mm, mp *d, const mp_expfactor *f, size_t n) { - mp *d = mpmont_mexpr(mm, f, n); - d = mpmont_reduce(mm, d, d); - return (d); + mp_expfactor *ff = xmalloc(n * sizeof(mp_expfactor)); + size_t i; + + for (i = 0; i < n; i++) { + ff[i].base = mpmont_mul(mm, MP_NEW, f[i].base, mm->r2); + ff[i].exp = f[i].exp; + } + d = mexpr(mm, d, ff, n); + return (mpmont_reduce(mm, d, d)); } /*----- Test rig ----------------------------------------------------------*/ @@ -216,7 +163,7 @@ mp *mpmont_mexp(mpmont *mm, mpmont_factor *f, size_t n) static int verify(size_t n, dstr *v) { mp *m = *(mp **)v[0].buf; - mpmont_factor *f = xmalloc(n * sizeof(*f)); + mp_expfactor *f = xmalloc(n * sizeof(*f)); mp *r, *rr; size_t i, j; mpmont mm; @@ -230,8 +177,8 @@ static int verify(size_t n, dstr *v) rr = *(mp **)v[j].buf; mpmont_create(&mm, m); - r = mpmont_mexp(&mm, f, n); - if (MP_CMP(r, !=, rr)) { + r = mpmont_mexp(&mm, MP_NEW, f, n); + if (!MP_EQ(r, rr)) { fputs("\n*** mexp failed\n", stderr); fputs("m = ", stderr); mp_writefile(m, stderr, 10); for (i = 0; i < n; i++) { @@ -254,6 +201,7 @@ static int verify(size_t n, dstr *v) MP_DROP(r); MP_DROP(rr); mpmont_destroy(&mm); + assert(mparena_count(MPARENA_GLOBAL) == 0); return (ok); }