X-Git-Url: https://git.distorted.org.uk/u/mdw/catacomb/blobdiff_plain/032099d14603e665204747a205af9c8a3f039de6..02d7884df1f33c9c7dc3a14c4b1a5f520ebe090a:/mpmont.c diff --git a/mpmont.c b/mpmont.c index 4868bff..bfede63 100644 --- a/mpmont.c +++ b/mpmont.c @@ -1,6 +1,6 @@ /* -*-c-*- * - * $Id: mpmont.c,v 1.11 2000/10/08 12:04:27 mdw Exp $ + * $Id: mpmont.c,v 1.18 2004/04/03 03:32:05 mdw Exp $ * * Montgomery reduction * @@ -30,6 +30,32 @@ /*----- Revision history --------------------------------------------------* * * $Log: mpmont.c,v $ + * Revision 1.18 2004/04/03 03:32:05 mdw + * General robustification. + * + * Revision 1.17 2004/04/01 12:50:09 mdw + * Add cyclic group abstraction, with test code. Separate off exponentation + * functions for better static linking. Fix a buttload of bugs on the way. + * Generally ensure that negative exponents do inversion correctly. Add + * table of standard prime-field subgroups. (Binary field subgroups are + * currently unimplemented but easy to add if anyone ever finds a good one.) + * + * Revision 1.16 2002/01/13 13:40:31 mdw + * Avoid trashing arguments before we've used them. + * + * Revision 1.15 2001/06/16 13:00:20 mdw + * Use the generic exponentiation functions. + * + * Revision 1.14 2001/02/22 09:04:26 mdw + * Cosmetic fix. + * + * Revision 1.13 2001/02/03 12:00:29 mdw + * Now @mp_drop@ checks its argument is non-NULL before attempting to free + * it. Note that the macro version @MP_DROP@ doesn't do this. + * + * Revision 1.12 2000/10/08 15:48:35 mdw + * Rename Karatsuba constants now that we have @gfx_kmul@ too. + * * Revision 1.11 2000/10/08 12:04:27 mdw * (mpmont_reduce, mpmont_mul): Cope with negative numbers. * @@ -89,7 +115,7 @@ /* #define MPMONT_DISABLE */ -/*----- Main code ---------------------------------------------------------*/ +/*----- Reduction and multiplication --------------------------------------*/ /* --- @mpmont_create@ --- * * @@ -121,15 +147,9 @@ void mpmont_create(mpmont *mm, mp *m) mp *r2 = mp_new(2 * n + 1, 0); mp r; - /* --- Validate the arguments --- */ - - assert(((void)"Montgomery modulus must be positive", - (m->f & MP_NEG) == 0)); - assert(((void)"Montgomery modulus must be odd", m->v[0] & 1)); - /* --- Take a copy of the modulus --- */ - mp_shrink(m); + assert(MP_ISPOS(m) && MP_ISODD(m)); mm->m = MP_COPY(m); /* --- Determine %$R^2$% --- */ @@ -198,7 +218,7 @@ mp *mpmont_reduce(mpmont *mm, mp *d, mp *a) /* --- Check for serious Karatsuba reduction --- */ - if (n > KARATSUBA_CUTOFF * 3) { + if (n > MPK_THRESH * 3) { mp al; mpw *vl; mp *u; @@ -283,7 +303,7 @@ mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b) mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b) { - if (mm->n > KARATSUBA_CUTOFF * 3) { + if (mm->n > MPK_THRESH * 3) { d = mp_mul(d, a, b); d = mpmont_reduce(mm, d, d); } else { @@ -353,222 +373,6 @@ mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b) #endif -/* --- @mpmont_expr@ --- * - * - * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context - * @mp *d@ = fake destination - * @mp *a@ = base - * @mp *e@ = exponent - * - * Returns: Result, %$a^e R \bmod m$%. - */ - -#define WINSZ 5 -#define TABSZ (1 << (WINSZ - 1)) - -#define THRESH (((MPW_BITS / WINSZ) << 2) + 1) - -static mp *exp_simple(mpmont *mm, mp *d, mp *a, mp *e) -{ - mpscan sc; - mp *ar; - mp *x = MP_COPY(mm->r); - mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW; - unsigned sq = 0; - - mp_rscan(&sc, e); - if (!MP_RSTEP(&sc)) - goto exit; - while (!MP_RBIT(&sc)) - MP_RSTEP(&sc); - - /* --- Do the main body of the work --- */ - - ar = mpmont_mul(mm, MP_NEW, a, mm->r2); - for (;;) { - sq++; - while (sq) { - mp *y; - y = mp_sqr(spare, x); - y = mpmont_reduce(mm, y, y); - spare = x; x = y; - sq--; - } - { mp *y = mpmont_mul(mm, spare, x, ar); spare = x; x = y; } - sq = 0; - for (;;) { - if (!MP_RSTEP(&sc)) - goto done; - if (MP_RBIT(&sc)) - break; - sq++; - } - } - - /* --- Do a final round of squaring --- */ - -done: - while (sq) { - mp *y; - y = mp_sqr(spare, x); - y = mpmont_reduce(mm, y, y); - spare = x; x = y; - sq--; - } - - /* --- Done --- */ - - MP_DROP(ar); -exit: - if (spare != MP_NEW) - MP_DROP(spare); - if (d != MP_NEW) - MP_DROP(d); - return (x); -} - -mp *mpmont_expr(mpmont *mm, mp *d, mp *a, mp *e) -{ - mp **tab; - mp *ar, *a2; - mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW; - mp *x = MP_COPY(mm->r); - unsigned i, sq = 0; - mpscan sc; - - /* --- Do we bother? --- */ - - MP_SHRINK(e); - if (MP_LEN(e) == 0) - goto exit; - if (MP_LEN(e) < THRESH) { - x->ref--; - return (exp_simple(mm, d, a, e)); - } - - /* --- Do the precomputation --- */ - - ar = mpmont_mul(mm, MP_NEW, a, mm->r2); - a2 = mp_sqr(MP_NEW, ar); - a2 = mpmont_reduce(mm, a2, a2); - tab = xmalloc(TABSZ * sizeof(mp *)); - tab[0] = ar; - for (i = 1; i < TABSZ; i++) - tab[i] = mpmont_mul(mm, MP_NEW, tab[i - 1], a2); - mp_drop(a2); - mp_rscan(&sc, e); - - /* --- Skip top-end zero bits --- * - * - * If the initial step worked, there must be a set bit somewhere, so keep - * stepping until I find it. - */ - - MP_RSTEP(&sc); - while (!MP_RBIT(&sc)) { - MP_RSTEP(&sc); - } - - /* --- Now for the main work --- */ - - for (;;) { - unsigned l = 0; - unsigned z = 0; - - /* --- The next bit is set, so read a window index --- * - * - * Reset @i@ to zero and increment @sq@. Then, until either I read - * @WINSZ@ bits or I run out of bits, scan in a bit: if it's clear, bump - * the @z@ counter; if it's set, push a set bit into @i@, shift it over - * by @z@ bits, bump @sq@ by @z + 1@ and clear @z@. By the end of this - * palaver, @i@ is an index to the precomputed value in @tab@. - */ - - i = 0; - sq++; - for (;;) { - l++; - if (l >= WINSZ || !MP_RSTEP(&sc)) - break; - if (!MP_RBIT(&sc)) - z++; - else { - i = ((i << 1) | 1) << z; - sq += z + 1; - z = 0; - } - } - - /* --- Do the squaring --- * - * - * Remember that @sq@ carries over from the zero-skipping stuff below. - */ - - while (sq) { - mp *y; - y = mp_sqr(spare, x); - y = mpmont_reduce(mm, y, y); - spare = x; x = y; - sq--; - } - - /* --- Do the multiply --- */ - - { mp *y = mpmont_mul(mm, spare, x, tab[i]); spare = x; x = y; } - - /* --- Now grind along through the rest of the bits --- */ - - sq = z; - for (;;) { - if (!MP_RSTEP(&sc)) - goto done; - if (MP_RBIT(&sc)) - break; - sq++; - } - } - - /* --- Do a final round of squaring --- */ - -done: - while (sq) { - mp *y; - y = mp_sqr(spare, x); - y = mpmont_reduce(mm, y, y); - spare = x; x = y; - sq--; - } - - /* --- Done --- */ - - for (i = 0; i < TABSZ; i++) - mp_drop(tab[i]); - xfree(tab); -exit: - if (d != MP_NEW) - mp_drop(d); - if (spare) - mp_drop(spare); - return (x); -} - -/* --- @mpmont_exp@ --- * - * - * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context - * @mp *d@ = fake destination - * @mp *a@ = base - * @mp *e@ = exponent - * - * Returns: Result, %$a^e \bmod m$%. - */ - -mp *mpmont_exp(mpmont *mm, mp *d, mp *a, mp *e) -{ - d = mpmont_expr(mm, d, a, e); - d = mpmont_reduce(mm, d, d); - return (d); -} - /*----- Test rig ----------------------------------------------------------*/ #ifdef TEST_RIG @@ -678,46 +482,9 @@ static int tmul(dstr *v) return ok; } -static int texp(dstr *v) -{ - mp *m = *(mp **)v[0].buf; - mp *a = *(mp **)v[1].buf; - mp *b = *(mp **)v[2].buf; - mp *r = *(mp **)v[3].buf; - mp *mr; - int ok = 1; - - mpmont mm; - mpmont_create(&mm, m); - - mr = mpmont_exp(&mm, MP_NEW, a, b); - - if (!MP_EQ(mr, r)) { - fputs("\n*** montgomery modexp failed", stderr); - fputs("\n m = ", stderr); mp_writefile(m, stderr, 10); - fputs("\n a = ", stderr); mp_writefile(a, stderr, 10); - fputs("\n e = ", stderr); mp_writefile(b, stderr, 10); - fputs("\n r = ", stderr); mp_writefile(r, stderr, 10); - fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10); - fputc('\n', stderr); - ok = 0; - } - - MP_DROP(m); - MP_DROP(a); - MP_DROP(b); - MP_DROP(r); - MP_DROP(mr); - mpmont_destroy(&mm); - assert(mparena_count(MPARENA_GLOBAL) == 0); - return ok; -} - - static test_chunk tests[] = { { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } }, { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } }, - { "exp", texp, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } }, { 0, 0, { 0 } }, };