X-Git-Url: https://git.distorted.org.uk/u/mdw/catacomb/blobdiff_plain/1589affab225db500965e2cb869c534d6860e6bd..4edc47b89bc56cd4041fdb0f4e8e892acd589ed8:/f-prime.c diff --git a/f-prime.c b/f-prime.c index 473ba4c..088032b 100644 --- a/f-prime.c +++ b/f-prime.c @@ -1,6 +1,6 @@ /* -*-c-*- * - * $Id: f-prime.c,v 1.8 2004/04/01 12:50:09 mdw Exp $ + * $Id: f-prime.c,v 1.9 2004/04/01 21:28:41 mdw Exp $ * * Prime fields with Montgomery arithmetic * @@ -30,6 +30,11 @@ /*----- Revision history --------------------------------------------------* * * $Log: f-prime.c,v $ + * Revision 1.9 2004/04/01 21:28:41 mdw + * Normal basis support (translates to poly basis internally). Rewrite + * EC and prime group table generators in awk, so that they can reuse data + * for repeated constants. + * * Revision 1.8 2004/04/01 12:50:09 mdw * Add cyclic group abstraction, with test code. Separate off exponentation * functions for better static linking. Fix a buttload of bugs on the way. @@ -77,154 +82,94 @@ #include "mpmont.h" #include "mprand.h" -/*----- Data structures ---------------------------------------------------*/ +/*----- Main code ---------------------------------------------------------*/ typedef struct fctx { field f; mpmont mm; } fctx; -/*----- Main code ---------------------------------------------------------*/ - /* --- Field operations --- */ static void fdestroy(field *ff) -{ - fctx *f = (fctx *)ff; - mpmont_destroy(&f->mm); - DESTROY(f); -} + { fctx *f = (fctx *)ff; mpmont_destroy(&f->mm); DESTROY(f); } static mp *frand(field *ff, mp *d, grand *r) -{ - fctx *f = (fctx *)ff; - return (mprand_range(d, f->mm.m, r, 0)); -} + { fctx *f = (fctx *)ff; return (mprand_range(d, f->mm.m, r, 0)); } -static mp *fin(field *ff, mp *d, mp *x) -{ +static mp *fin(field *ff, mp *d, mp *x) { fctx *f = (fctx *)ff; mp_div(0, &d, x, f->mm.m); return (mpmont_mul(&f->mm, d, d, f->mm.r2)); } static mp *fout(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - return (mpmont_reduce(&f->mm, d, x)); -} + { fctx *f = (fctx *)ff; return (mpmont_reduce(&f->mm, d, x)); } -static int fzerop(field *ff, mp *x) -{ - return (!MP_LEN(x)); -} +static int fzerop(field *ff, mp *x) { return (!MP_LEN(x)); } static mp *fneg(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - return (mp_sub(d, f->mm.m, x)); -} + { fctx *f = (fctx *)ff; return (mp_sub(d, f->mm.m, x)); } -static mp *fadd(field *ff, mp *d, mp *x, mp *y) -{ - fctx *f = (fctx *)ff; - d = mp_add(d, x, y); - if (d->f & MP_NEG) - d = mp_add(d, d, f->mm.m); - else if (MP_CMP(d, >, f->mm.m)) - d = mp_sub(d, d, f->mm.m); +static mp *fadd(field *ff, mp *d, mp *x, mp *y) { + fctx *f = (fctx *)ff; d = mp_add(d, x, y); + if (d->f & MP_NEG) d = mp_add(d, d, f->mm.m); + else if (MP_CMP(d, >, f->mm.m)) d = mp_sub(d, d, f->mm.m); return (d); } -static mp *fsub(field *ff, mp *d, mp *x, mp *y) -{ - fctx *f = (fctx *)ff; - d = mp_sub(d, x, y); - if (d->f & MP_NEG) - d = mp_add(d, d, f->mm.m); - else if (MP_CMP(d, >, f->mm.m)) - d = mp_sub(d, d, f->mm.m); +static mp *fsub(field *ff, mp *d, mp *x, mp *y) { + fctx *f = (fctx *)ff; d = mp_sub(d, x, y); + if (d->f & MP_NEG) d = mp_add(d, d, f->mm.m); + else if (MP_CMP(d, >, f->mm.m)) d = mp_sub(d, d, f->mm.m); return (d); } static mp *fmul(field *ff, mp *d, mp *x, mp *y) -{ - fctx *f = (fctx *)ff; - return (mpmont_mul(&f->mm, d, x, y)); -} + { fctx *f = (fctx *)ff; return (mpmont_mul(&f->mm, d, x, y)); } -static mp *fsqr(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - d = mp_sqr(d, x); +static mp *fsqr(field *ff, mp *d, mp *x) { + fctx *f = (fctx *)ff; d = mp_sqr(d, x); return (mpmont_reduce(&f->mm, d, d)); } -static mp *finv(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - d = mpmont_reduce(&f->mm, d, x); - mp_gcd(0, 0, &d, f->mm.m, d); - return (mpmont_mul(&f->mm, d, d, f->mm.r2)); +static mp *finv(field *ff, mp *d, mp *x) { + fctx *f = (fctx *)ff; d = mpmont_reduce(&f->mm, d, x); + mp_gcd(0, 0, &d, f->mm.m, d); return (mpmont_mul(&f->mm, d, d, f->mm.r2)); } static mp *freduce(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - mp_div(0, &d, x, f->mm.m); - return (d); -} + { fctx *f = (fctx *)ff; mp_div(0, &d, x, f->mm.m); return (d); } -static mp *fsqrt(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - d = mpmont_reduce(&f->mm, d, x); - d = mp_modsqrt(d, d, f->mm.m); - if (!d) - return (d); +static mp *fsqrt(field *ff, mp *d, mp *x) { + fctx *f = (fctx *)ff; d = mpmont_reduce(&f->mm, d, x); + d = mp_modsqrt(d, d, f->mm.m); if (!d) return (d); return (mpmont_mul(&f->mm, d, d, f->mm.r2)); } -static mp *fdbl(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - d = mp_lsl(d, x, 1); - if (MP_CMP(d, >, f->mm.m)) - d = mp_sub(d, d, f->mm.m); +static mp *fdbl(field *ff, mp *d, mp *x) { + fctx *f = (fctx *)ff; d = mp_lsl(d, x, 1); + if (MP_CMP(d, >, f->mm.m)) d = mp_sub(d, d, f->mm.m); return (d); } -static mp *ftpl(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - MP_DEST(d, MP_LEN(x) + 1, x->f); +static mp *ftpl(field *ff, mp *d, mp *x) { + fctx *f = (fctx *)ff; MP_DEST(d, MP_LEN(x) + 1, x->f); MPX_UMULN(d->v, d->vl, x->v, x->vl, 3); - while (MP_CMP(d, >, f->mm.m)) - d = mp_sub(d, d, f->mm.m); + while (MP_CMP(d, >, f->mm.m)) d = mp_sub(d, d, f->mm.m); return (d); } -static mp *fqdl(field *ff, mp *d, mp *x) -{ - fctx *f = (fctx *)ff; - d = mp_lsl(d, x, 2); - while (MP_CMP(d, >, f->mm.m)) - d = mp_sub(d, d, f->mm.m); +static mp *fqdl(field *ff, mp *d, mp *x) { + fctx *f = (fctx *)ff; d = mp_lsl(d, x, 2); + while (MP_CMP(d, >, f->mm.m)) d = mp_sub(d, d, f->mm.m); return (d); } -static mp *fhlv(field *ff, mp *d, mp *x) -{ +static mp *fhlv(field *ff, mp *d, mp *x) { fctx *f = (fctx *)ff; - if (!MP_LEN(x)) { - MP_COPY(x); - MP_DROP(d); - return (x); - } - if (x->v[0] & 1) { - d = mp_add(d, x, f->mm.m); - x = d; - } + if (!MP_LEN(x)) { MP_COPY(x); MP_DROP(d); return (x); } + if (x->v[0] & 1) { d = mp_add(d, x, f->mm.m); x = d; } return (mp_lsr(d, x, 1)); }