3 * $Id: mpmont.c,v 1.13 2001/02/03 12:00:29 mdw Exp $
7 * (c) 1999 Straylight/Edgeware
10 /*----- Licensing notice --------------------------------------------------*
12 * This file is part of Catacomb.
14 * Catacomb is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU Library General Public License as
16 * published by the Free Software Foundation; either version 2 of the
17 * License, or (at your option) any later version.
19 * Catacomb is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU Library General Public License for more details.
24 * You should have received a copy of the GNU Library General Public
25 * License along with Catacomb; if not, write to the Free
26 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
30 /*----- Revision history --------------------------------------------------*
33 * Revision 1.13 2001/02/03 12:00:29 mdw
34 * Now @mp_drop@ checks its argument is non-NULL before attempting to free
35 * it. Note that the macro version @MP_DROP@ doesn't do this.
37 * Revision 1.12 2000/10/08 15:48:35 mdw
38 * Rename Karatsuba constants now that we have @gfx_kmul@ too.
40 * Revision 1.11 2000/10/08 12:04:27 mdw
41 * (mpmont_reduce, mpmont_mul): Cope with negative numbers.
43 * Revision 1.10 2000/07/29 17:05:43 mdw
44 * (mpmont_expr): Use sliding window exponentiation, with a drop-through
45 * for small exponents to use a simple left-to-right bitwise routine. This
46 * can reduce modexp times by up to a quarter.
48 * Revision 1.9 2000/06/17 11:45:09 mdw
49 * Major memory management overhaul. Added arena support. Use the secure
50 * arena for secret integers. Replace and improve the MP management macros
51 * (e.g., replace MP_MODIFY by MP_DEST).
53 * Revision 1.8 1999/12/22 15:55:00 mdw
54 * Adjust Karatsuba parameters.
56 * Revision 1.7 1999/12/11 01:51:14 mdw
57 * Use a Karatsuba-based reduction for large moduli.
59 * Revision 1.6 1999/12/10 23:18:39 mdw
60 * Change interface for suggested destinations.
62 * Revision 1.5 1999/11/22 13:58:40 mdw
63 * Add an option to disable Montgomery reduction, so that performance
64 * comparisons can be done.
66 * Revision 1.4 1999/11/21 12:27:06 mdw
67 * Remove a division from the Montgomery setup by calculating
68 * %$R^2 \bmod m$% first and then %$R \bmod m$% by Montgomery reduction of
71 * Revision 1.3 1999/11/21 11:35:10 mdw
72 * Performance improvement: use @mp_sqr@ and @mpmont_reduce@ instead of
73 * @mpmont_mul@ for squaring in exponentiation.
75 * Revision 1.2 1999/11/19 13:17:26 mdw
76 * Add extra interface to exponentiation which returns a Montgomerized
79 * Revision 1.1 1999/11/17 18:02:16 mdw
80 * New multiprecision integer arithmetic suite.
84 /*----- Header files ------------------------------------------------------*/
89 /*----- Tweakables --------------------------------------------------------*/
91 /* --- @MPMONT_DISABLE@ --- *
93 * Replace all the clever Montgomery reduction with good old-fashioned long
97 /* #define MPMONT_DISABLE */
99 /*----- Main code ---------------------------------------------------------*/
101 /* --- @mpmont_create@ --- *
103 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
104 * @mp *m@ = modulus to use
108 * Use: Initializes a Montgomery reduction context ready for use.
109 * The argument @m@ must be a positive odd integer.
112 #ifdef MPMONT_DISABLE
114 void mpmont_create(mpmont
*mm
, mp
*m
)
125 void mpmont_create(mpmont
*mm
, mp
*m
)
127 size_t n
= MP_LEN(m
);
128 mp
*r2
= mp_new(2 * n
+ 1, 0);
131 /* --- Validate the arguments --- */
133 assert(((void)"Montgomery modulus must be positive",
134 (m
->f
& MP_NEG
) == 0));
135 assert(((void)"Montgomery modulus must be odd", m
->v
[0] & 1));
137 /* --- Take a copy of the modulus --- */
142 /* --- Determine %$R^2$% --- */
145 MPX_ZERO(r2
->v
, r2
->vl
- 1);
148 /* --- Find the magic value @mi@ --- */
150 mp_build(&r
, r2
->v
+ n
, r2
->vl
);
152 mp_gcd(0, 0, &mm
->mi
, &r
, m
);
153 mm
->mi
= mp_sub(mm
->mi
, &r
, mm
->mi
);
155 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
158 mp_div(0, &mm
->r2
, r2
, m
);
159 mm
->r
= mpmont_reduce(mm
, MP_NEW
, mm
->r2
);
165 /* --- @mpmont_destroy@ --- *
167 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
171 * Use: Disposes of a context when it's no longer of any use to
175 void mpmont_destroy(mpmont
*mm
)
183 /* --- @mpmont_reduce@ --- *
185 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
186 * @mp *d@ = destination
187 * @mp *a@ = source, assumed positive
189 * Returns: Result, %$a R^{-1} \bmod m$%.
192 #ifdef MPMONT_DISABLE
194 mp
*mpmont_reduce(mpmont
*mm
, mp
*d
, mp
*a
)
196 mp_div(0, &d
, a
, mm
->m
);
202 mp
*mpmont_reduce(mpmont
*mm
, mp
*d
, mp
*a
)
206 /* --- Check for serious Karatsuba reduction --- */
208 if (n
> MPK_THRESH
* 3) {
217 mp_build(&al
, a
->v
, vl
);
218 u
= mp_mul(MP_NEW
, &al
, mm
->mi
);
221 u
= mp_mul(u
, u
, mm
->m
);
226 /* --- Otherwise do it the hard way --- */
234 /* --- Initial conditioning of the arguments --- */
240 MP_DEST(d
, 2 * n
+ 1, a
->f
);
242 dv
= d
->v
; dvl
= d
->vl
;
243 mv
= mm
->m
->v
; mvl
= mm
->m
->vl
;
245 /* --- Let's go to work --- */
249 mpw u
= MPW(*dv
* mi
);
250 MPX_UMLAN(dv
, dvl
, mv
, mvl
, u
);
255 /* --- Wrap everything up --- */
257 memmove(d
->v
, d
->v
+ n
, MPWS(MP_LEN(d
) - n
));
259 if (MPX_UCMP(d
->v
, d
->vl
, >=, mm
->m
->v
, mm
->m
->vl
))
260 mpx_usub(d
->v
, d
->vl
, d
->v
, d
->vl
, mm
->m
->v
, mm
->m
->vl
);
262 mpx_usub(d
->v
, d
->vl
, mm
->m
->v
, mm
->m
->vl
, d
->v
, d
->vl
);
271 /* --- @mpmont_mul@ --- *
273 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
274 * @mp *d@ = destination
275 * @mp *a, *b@ = sources, assumed positive
277 * Returns: Result, %$a b R^{-1} \bmod m$%.
280 #ifdef MPMONT_DISABLE
282 mp
*mpmont_mul(mpmont
*mm
, mp
*d
, mp
*a
, mp
*b
)
285 mp_div(0, &d
, d
, mm
->m
);
291 mp
*mpmont_mul(mpmont
*mm
, mp
*d
, mp
*a
, mp
*b
)
293 if (mm
->n
> MPK_THRESH
* 3) {
295 d
= mpmont_reduce(mm
, d
, d
);
305 /* --- Initial conditioning of the arguments --- */
307 if (MP_LEN(a
) > MP_LEN(b
)) {
308 mp
*t
= a
; a
= b
; b
= t
;
314 MP_DEST(d
, 2 * n
+ 1, a
->f
| b
->f
| MP_UNDEF
);
315 dv
= d
->v
; dvl
= d
->vl
;
317 av
= a
->v
; avl
= a
->vl
;
318 bv
= b
->v
; bvl
= b
->vl
;
319 mv
= mm
->m
->v
; mvl
= mm
->m
->vl
;
322 /* --- Montgomery multiplication phase --- */
326 while (i
< n
&& av
< avl
) {
328 mpw u
= MPW((*dv
+ x
* y
) * mi
);
329 MPX_UMLAN(dv
, dvl
, bv
, bvl
, x
);
330 MPX_UMLAN(dv
, dvl
, mv
, mvl
, u
);
335 /* --- Simpler Montgomery reduction phase --- */
338 mpw u
= MPW(*dv
* mi
);
339 MPX_UMLAN(dv
, dvl
, mv
, mvl
, u
);
346 memmove(d
->v
, dv
, MPWS(dvl
- dv
));
348 if (MPX_UCMP(d
->v
, d
->vl
, >=, mm
->m
->v
, mm
->m
->vl
))
349 mpx_usub(d
->v
, d
->vl
, d
->v
, d
->vl
, mm
->m
->v
, mm
->m
->vl
);
350 if ((a
->f
^ b
->f
) & MP_NEG
)
351 mpx_usub(d
->v
, d
->vl
, mm
->m
->v
, mm
->m
->vl
, d
->v
, d
->vl
);
353 d
->f
= (a
->f
| b
->f
) & MP_BURN
;
363 /* --- @mpmont_expr@ --- *
365 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
366 * @mp *d@ = fake destination
370 * Returns: Result, %$a^e R \bmod m$%.
374 #define TABSZ (1 << (WINSZ - 1))
376 #define THRESH (((MPW_BITS / WINSZ) << 2) + 1)
378 static mp
*exp_simple(mpmont
*mm
, mp
*d
, mp
*a
, mp
*e
)
382 mp
*x
= MP_COPY(mm
->r
);
383 mp
*spare
= (e
->f
& MP_BURN
) ? MP_NEWSEC
: MP_NEW
;
389 while (!MP_RBIT(&sc
))
392 /* --- Do the main body of the work --- */
394 ar
= mpmont_mul(mm
, MP_NEW
, a
, mm
->r2
);
399 y
= mp_sqr(spare
, x
);
400 y
= mpmont_reduce(mm
, y
, y
);
404 { mp
*y
= mpmont_mul(mm
, spare
, x
, ar
); spare
= x
; x
= y
; }
415 /* --- Do a final round of squaring --- */
420 y
= mp_sqr(spare
, x
);
421 y
= mpmont_reduce(mm
, y
, y
);
437 mp
*mpmont_expr(mpmont
*mm
, mp
*d
, mp
*a
, mp
*e
)
441 mp
*spare
= (e
->f
& MP_BURN
) ? MP_NEWSEC
: MP_NEW
;
442 mp
*x
= MP_COPY(mm
->r
);
446 /* --- Do we bother? --- */
451 if (MP_LEN(e
) < THRESH
) {
453 return (exp_simple(mm
, d
, a
, e
));
456 /* --- Do the precomputation --- */
458 ar
= mpmont_mul(mm
, MP_NEW
, a
, mm
->r2
);
459 a2
= mp_sqr(MP_NEW
, ar
);
460 a2
= mpmont_reduce(mm
, a2
, a2
);
461 tab
= xmalloc(TABSZ
* sizeof(mp
*));
463 for (i
= 1; i
< TABSZ
; i
++)
464 tab
[i
] = mpmont_mul(mm
, MP_NEW
, tab
[i
- 1], a2
);
468 /* --- Skip top-end zero bits --- *
470 * If the initial step worked, there must be a set bit somewhere, so keep
471 * stepping until I find it.
475 while (!MP_RBIT(&sc
)) {
479 /* --- Now for the main work --- */
485 /* --- The next bit is set, so read a window index --- *
487 * Reset @i@ to zero and increment @sq@. Then, until either I read
488 * @WINSZ@ bits or I run out of bits, scan in a bit: if it's clear, bump
489 * the @z@ counter; if it's set, push a set bit into @i@, shift it over
490 * by @z@ bits, bump @sq@ by @z + 1@ and clear @z@. By the end of this
491 * palaver, @i@ is an index to the precomputed value in @tab@.
498 if (l
>= WINSZ
|| !MP_RSTEP(&sc
))
503 i
= ((i
<< 1) | 1) << z
;
509 /* --- Do the squaring --- *
511 * Remember that @sq@ carries over from the zero-skipping stuff below.
516 y
= mp_sqr(spare
, x
);
517 y
= mpmont_reduce(mm
, y
, y
);
522 /* --- Do the multiply --- */
524 { mp
*y
= mpmont_mul(mm
, spare
, x
, tab
[i
]); spare
= x
; x
= y
; }
526 /* --- Now grind along through the rest of the bits --- */
538 /* --- Do a final round of squaring --- */
543 y
= mp_sqr(spare
, x
);
544 y
= mpmont_reduce(mm
, y
, y
);
551 for (i
= 0; i
< TABSZ
; i
++)
560 /* --- @mpmont_exp@ --- *
562 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
563 * @mp *d@ = fake destination
567 * Returns: Result, %$a^e \bmod m$%.
570 mp
*mpmont_exp(mpmont
*mm
, mp
*d
, mp
*a
, mp
*e
)
572 d
= mpmont_expr(mm
, d
, a
, e
);
573 d
= mpmont_reduce(mm
, d
, d
);
577 /*----- Test rig ----------------------------------------------------------*/
581 static int tcreate(dstr
*v
)
583 mp
*m
= *(mp
**)v
[0].buf
;
584 mp
*mi
= *(mp
**)v
[1].buf
;
585 mp
*r
= *(mp
**)v
[2].buf
;
586 mp
*r2
= *(mp
**)v
[3].buf
;
591 mpmont_create(&mm
, m
);
593 if (mm
.mi
->v
[0] != mi
->v
[0]) {
594 fprintf(stderr
, "\n*** bad mi: found %lu, expected %lu",
595 (unsigned long)mm
.mi
->v
[0], (unsigned long)mi
->v
[0]);
596 fputs("\nm = ", stderr
); mp_writefile(m
, stderr
, 10);
601 if (!MP_EQ(mm
.r
, r
)) {
602 fputs("\n*** bad r", stderr
);
603 fputs("\nm = ", stderr
); mp_writefile(m
, stderr
, 10);
604 fputs("\nexpected ", stderr
); mp_writefile(r
, stderr
, 10);
605 fputs("\n found ", stderr
); mp_writefile(mm
.r
, stderr
, 10);
610 if (!MP_EQ(mm
.r2
, r2
)) {
611 fputs("\n*** bad r2", stderr
);
612 fputs("\nm = ", stderr
); mp_writefile(m
, stderr
, 10);
613 fputs("\nexpected ", stderr
); mp_writefile(r2
, stderr
, 10);
614 fputs("\n found ", stderr
); mp_writefile(mm
.r2
, stderr
, 10);
624 assert(mparena_count(MPARENA_GLOBAL
) == 0);
628 static int tmul(dstr
*v
)
630 mp
*m
= *(mp
**)v
[0].buf
;
631 mp
*a
= *(mp
**)v
[1].buf
;
632 mp
*b
= *(mp
**)v
[2].buf
;
633 mp
*r
= *(mp
**)v
[3].buf
;
637 mpmont_create(&mm
, m
);
640 mp
*qr
= mp_mul(MP_NEW
, a
, b
);
641 mp_div(0, &qr
, qr
, m
);
644 fputs("\n*** classical modmul failed", stderr
);
645 fputs("\n m = ", stderr
); mp_writefile(m
, stderr
, 10);
646 fputs("\n a = ", stderr
); mp_writefile(a
, stderr
, 10);
647 fputs("\n b = ", stderr
); mp_writefile(b
, stderr
, 10);
648 fputs("\n r = ", stderr
); mp_writefile(r
, stderr
, 10);
649 fputs("\nqr = ", stderr
); mp_writefile(qr
, stderr
, 10);
658 mp
*ar
= mpmont_mul(&mm
, MP_NEW
, a
, mm
.r2
);
659 mp
*br
= mpmont_mul(&mm
, MP_NEW
, b
, mm
.r2
);
660 mp
*mr
= mpmont_mul(&mm
, MP_NEW
, ar
, br
);
661 mr
= mpmont_reduce(&mm
, mr
, mr
);
663 fputs("\n*** montgomery modmul failed", stderr
);
664 fputs("\n m = ", stderr
); mp_writefile(m
, stderr
, 10);
665 fputs("\n a = ", stderr
); mp_writefile(a
, stderr
, 10);
666 fputs("\n b = ", stderr
); mp_writefile(b
, stderr
, 10);
667 fputs("\n r = ", stderr
); mp_writefile(r
, stderr
, 10);
668 fputs("\nmr = ", stderr
); mp_writefile(mr
, stderr
, 10);
672 MP_DROP(ar
); MP_DROP(br
);
682 assert(mparena_count(MPARENA_GLOBAL
) == 0);
686 static int texp(dstr
*v
)
688 mp
*m
= *(mp
**)v
[0].buf
;
689 mp
*a
= *(mp
**)v
[1].buf
;
690 mp
*b
= *(mp
**)v
[2].buf
;
691 mp
*r
= *(mp
**)v
[3].buf
;
696 mpmont_create(&mm
, m
);
698 mr
= mpmont_exp(&mm
, MP_NEW
, a
, b
);
701 fputs("\n*** montgomery modexp failed", stderr
);
702 fputs("\n m = ", stderr
); mp_writefile(m
, stderr
, 10);
703 fputs("\n a = ", stderr
); mp_writefile(a
, stderr
, 10);
704 fputs("\n e = ", stderr
); mp_writefile(b
, stderr
, 10);
705 fputs("\n r = ", stderr
); mp_writefile(r
, stderr
, 10);
706 fputs("\nmr = ", stderr
); mp_writefile(mr
, stderr
, 10);
717 assert(mparena_count(MPARENA_GLOBAL
) == 0);
722 static test_chunk tests
[] = {
723 { "create", tcreate
, { &type_mp
, &type_mp
, &type_mp
, &type_mp
, 0 } },
724 { "mul", tmul
, { &type_mp
, &type_mp
, &type_mp
, &type_mp
, 0 } },
725 { "exp", texp
, { &type_mp
, &type_mp
, &type_mp
, &type_mp
, 0 } },
729 int main(int argc
, char *argv
[])
732 test_run(argc
, argv
, tests
, SRCDIR
"/tests/mpmont");
738 /*----- That's all, folks -------------------------------------------------*/