d3409d5e |
1 | /* -*-c-*- |
2 | * |
032099d1 |
3 | * $Id: mpmont.c,v 1.11 2000/10/08 12:04:27 mdw Exp $ |
d3409d5e |
4 | * |
5 | * Montgomery reduction |
6 | * |
7 | * (c) 1999 Straylight/Edgeware |
8 | */ |
9 | |
10 | /*----- Licensing notice --------------------------------------------------* |
11 | * |
12 | * This file is part of Catacomb. |
13 | * |
14 | * Catacomb is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU Library General Public License as |
16 | * published by the Free Software Foundation; either version 2 of the |
17 | * License, or (at your option) any later version. |
18 | * |
19 | * Catacomb is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU Library General Public License for more details. |
23 | * |
24 | * You should have received a copy of the GNU Library General Public |
25 | * License along with Catacomb; if not, write to the Free |
26 | * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, |
27 | * MA 02111-1307, USA. |
28 | */ |
29 | |
30 | /*----- Revision history --------------------------------------------------* |
31 | * |
32 | * $Log: mpmont.c,v $ |
032099d1 |
33 | * Revision 1.11 2000/10/08 12:04:27 mdw |
34 | * (mpmont_reduce, mpmont_mul): Cope with negative numbers. |
35 | * |
c9d4c30b |
36 | * Revision 1.10 2000/07/29 17:05:43 mdw |
37 | * (mpmont_expr): Use sliding window exponentiation, with a drop-through |
38 | * for small exponents to use a simple left-to-right bitwise routine. This |
39 | * can reduce modexp times by up to a quarter. |
40 | * |
d34decd2 |
41 | * Revision 1.9 2000/06/17 11:45:09 mdw |
42 | * Major memory management overhaul. Added arena support. Use the secure |
43 | * arena for secret integers. Replace and improve the MP management macros |
44 | * (e.g., replace MP_MODIFY by MP_DEST). |
45 | * |
01f6ed1a |
46 | * Revision 1.8 1999/12/22 15:55:00 mdw |
47 | * Adjust Karatsuba parameters. |
48 | * |
f5f35081 |
49 | * Revision 1.7 1999/12/11 01:51:14 mdw |
50 | * Use a Karatsuba-based reduction for large moduli. |
51 | * |
ef5f4810 |
52 | * Revision 1.6 1999/12/10 23:18:39 mdw |
53 | * Change interface for suggested destinations. |
54 | * |
52e4b041 |
55 | * Revision 1.5 1999/11/22 13:58:40 mdw |
56 | * Add an option to disable Montgomery reduction, so that performance |
57 | * comparisons can be done. |
58 | * |
93feaa6e |
59 | * Revision 1.4 1999/11/21 12:27:06 mdw |
60 | * Remove a division from the Montgomery setup by calculating |
61 | * %$R^2 \bmod m$% first and then %$R \bmod m$% by Montgomery reduction of |
62 | * %$R^2$%. |
63 | * |
79a34029 |
64 | * Revision 1.3 1999/11/21 11:35:10 mdw |
65 | * Performance improvement: use @mp_sqr@ and @mpmont_reduce@ instead of |
66 | * @mpmont_mul@ for squaring in exponentiation. |
67 | * |
17ad212e |
68 | * Revision 1.2 1999/11/19 13:17:26 mdw |
69 | * Add extra interface to exponentiation which returns a Montgomerized |
70 | * result. |
71 | * |
d3409d5e |
72 | * Revision 1.1 1999/11/17 18:02:16 mdw |
73 | * New multiprecision integer arithmetic suite. |
74 | * |
75 | */ |
76 | |
77 | /*----- Header files ------------------------------------------------------*/ |
78 | |
79 | #include "mp.h" |
80 | #include "mpmont.h" |
81 | |
52e4b041 |
82 | /*----- Tweakables --------------------------------------------------------*/ |
83 | |
84 | /* --- @MPMONT_DISABLE@ --- * |
85 | * |
86 | * Replace all the clever Montgomery reduction with good old-fashioned long |
87 | * division. |
88 | */ |
89 | |
90 | /* #define MPMONT_DISABLE */ |
91 | |
d3409d5e |
92 | /*----- Main code ---------------------------------------------------------*/ |
93 | |
94 | /* --- @mpmont_create@ --- * |
95 | * |
96 | * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context |
97 | * @mp *m@ = modulus to use |
98 | * |
99 | * Returns: --- |
100 | * |
101 | * Use: Initializes a Montgomery reduction context ready for use. |
ef5f4810 |
102 | * The argument @m@ must be a positive odd integer. |
d3409d5e |
103 | */ |
104 | |
52e4b041 |
105 | #ifdef MPMONT_DISABLE |
106 | |
107 | void mpmont_create(mpmont *mm, mp *m) |
108 | { |
109 | mp_shrink(m); |
110 | mm->m = MP_COPY(m); |
111 | mm->r = MP_ONE; |
112 | mm->r2 = MP_ONE; |
f5f35081 |
113 | mm->mi = MP_ONE; |
52e4b041 |
114 | } |
115 | |
116 | #else |
117 | |
d3409d5e |
118 | void mpmont_create(mpmont *mm, mp *m) |
119 | { |
f5f35081 |
120 | size_t n = MP_LEN(m); |
d34decd2 |
121 | mp *r2 = mp_new(2 * n + 1, 0); |
f5f35081 |
122 | mp r; |
123 | |
ef5f4810 |
124 | /* --- Validate the arguments --- */ |
125 | |
126 | assert(((void)"Montgomery modulus must be positive", |
127 | (m->f & MP_NEG) == 0)); |
128 | assert(((void)"Montgomery modulus must be odd", m->v[0] & 1)); |
129 | |
d3409d5e |
130 | /* --- Take a copy of the modulus --- */ |
131 | |
132 | mp_shrink(m); |
133 | mm->m = MP_COPY(m); |
134 | |
f5f35081 |
135 | /* --- Determine %$R^2$% --- */ |
d3409d5e |
136 | |
f5f35081 |
137 | mm->n = n; |
138 | MPX_ZERO(r2->v, r2->vl - 1); |
139 | r2->vl[-1] = 1; |
d3409d5e |
140 | |
f5f35081 |
141 | /* --- Find the magic value @mi@ --- */ |
142 | |
143 | mp_build(&r, r2->v + n, r2->vl); |
144 | mm->mi = MP_NEW; |
145 | mp_gcd(0, 0, &mm->mi, &r, m); |
146 | mm->mi = mp_sub(mm->mi, &r, mm->mi); |
d3409d5e |
147 | |
148 | /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */ |
149 | |
f5f35081 |
150 | mm->r2 = MP_NEW; |
151 | mp_div(0, &mm->r2, r2, m); |
152 | mm->r = mpmont_reduce(mm, MP_NEW, mm->r2); |
153 | MP_DROP(r2); |
d3409d5e |
154 | } |
155 | |
52e4b041 |
156 | #endif |
157 | |
d3409d5e |
158 | /* --- @mpmont_destroy@ --- * |
159 | * |
160 | * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context |
161 | * |
162 | * Returns: --- |
163 | * |
164 | * Use: Disposes of a context when it's no longer of any use to |
165 | * anyone. |
166 | */ |
167 | |
168 | void mpmont_destroy(mpmont *mm) |
169 | { |
170 | MP_DROP(mm->m); |
171 | MP_DROP(mm->r); |
172 | MP_DROP(mm->r2); |
f5f35081 |
173 | MP_DROP(mm->mi); |
d3409d5e |
174 | } |
175 | |
176 | /* --- @mpmont_reduce@ --- * |
177 | * |
178 | * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context |
179 | * @mp *d@ = destination |
ef5f4810 |
180 | * @mp *a@ = source, assumed positive |
d3409d5e |
181 | * |
182 | * Returns: Result, %$a R^{-1} \bmod m$%. |
183 | */ |
184 | |
52e4b041 |
185 | #ifdef MPMONT_DISABLE |
186 | |
ef5f4810 |
187 | mp *mpmont_reduce(mpmont *mm, mp *d, mp *a) |
52e4b041 |
188 | { |
189 | mp_div(0, &d, a, mm->m); |
190 | return (d); |
191 | } |
192 | |
193 | #else |
194 | |
ef5f4810 |
195 | mp *mpmont_reduce(mpmont *mm, mp *d, mp *a) |
d3409d5e |
196 | { |
f5f35081 |
197 | size_t n = mm->n; |
198 | |
199 | /* --- Check for serious Karatsuba reduction --- */ |
200 | |
01f6ed1a |
201 | if (n > KARATSUBA_CUTOFF * 3) { |
f5f35081 |
202 | mp al; |
203 | mpw *vl; |
204 | mp *u; |
205 | |
206 | if (MP_LEN(a) >= n) |
207 | vl = a->v + n; |
208 | else |
209 | vl = a->vl; |
210 | mp_build(&al, a->v, vl); |
211 | u = mp_mul(MP_NEW, &al, mm->mi); |
212 | if (MP_LEN(u) > n) |
213 | u->vl = u->v + n; |
214 | u = mp_mul(u, u, mm->m); |
215 | d = mp_add(d, a, u); |
216 | mp_drop(u); |
217 | } |
d3409d5e |
218 | |
f5f35081 |
219 | /* --- Otherwise do it the hard way --- */ |
d3409d5e |
220 | |
d3409d5e |
221 | else { |
f5f35081 |
222 | mpw *dv, *dvl; |
223 | mpw *mv, *mvl; |
224 | mpw mi; |
225 | size_t k = n; |
226 | |
227 | /* --- Initial conditioning of the arguments --- */ |
228 | |
d34decd2 |
229 | a = MP_COPY(a); |
230 | if (d) |
231 | MP_DROP(d); |
232 | d = a; |
233 | MP_DEST(d, 2 * n + 1, a->f); |
234 | |
f5f35081 |
235 | dv = d->v; dvl = d->vl; |
236 | mv = mm->m->v; mvl = mm->m->vl; |
d3409d5e |
237 | |
f5f35081 |
238 | /* --- Let's go to work --- */ |
d3409d5e |
239 | |
f5f35081 |
240 | mi = mm->mi->v[0]; |
241 | while (k--) { |
242 | mpw u = MPW(*dv * mi); |
243 | MPX_UMLAN(dv, dvl, mv, mvl, u); |
244 | dv++; |
245 | } |
d3409d5e |
246 | } |
247 | |
f5f35081 |
248 | /* --- Wrap everything up --- */ |
d3409d5e |
249 | |
f5f35081 |
250 | memmove(d->v, d->v + n, MPWS(MP_LEN(d) - n)); |
251 | d->vl -= n; |
032099d1 |
252 | if (MPX_UCMP(d->v, d->vl, >=, mm->m->v, mm->m->vl)) |
253 | mpx_usub(d->v, d->vl, d->v, d->vl, mm->m->v, mm->m->vl); |
254 | if (d->f & MP_NEG) { |
255 | mpx_usub(d->v, d->vl, mm->m->v, mm->m->vl, d->v, d->vl); |
256 | d->f &= ~MP_NEG; |
257 | } |
f5f35081 |
258 | MP_SHRINK(d); |
d3409d5e |
259 | return (d); |
260 | } |
261 | |
52e4b041 |
262 | #endif |
263 | |
d3409d5e |
264 | /* --- @mpmont_mul@ --- * |
265 | * |
266 | * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context |
267 | * @mp *d@ = destination |
ef5f4810 |
268 | * @mp *a, *b@ = sources, assumed positive |
d3409d5e |
269 | * |
270 | * Returns: Result, %$a b R^{-1} \bmod m$%. |
271 | */ |
272 | |
52e4b041 |
273 | #ifdef MPMONT_DISABLE |
274 | |
ef5f4810 |
275 | mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b) |
52e4b041 |
276 | { |
277 | d = mp_mul(d, a, b); |
278 | mp_div(0, &d, d, mm->m); |
279 | return (d); |
280 | } |
281 | |
282 | #else |
283 | |
ef5f4810 |
284 | mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b) |
d3409d5e |
285 | { |
01f6ed1a |
286 | if (mm->n > KARATSUBA_CUTOFF * 3) { |
ef5f4810 |
287 | d = mp_mul(d, a, b); |
288 | d = mpmont_reduce(mm, d, d); |
289 | } else { |
290 | mpw *dv, *dvl; |
291 | mpw *av, *avl; |
292 | mpw *bv, *bvl; |
293 | mpw *mv, *mvl; |
294 | mpw y; |
295 | size_t n, i; |
f5f35081 |
296 | mpw mi; |
ef5f4810 |
297 | |
298 | /* --- Initial conditioning of the arguments --- */ |
299 | |
300 | if (MP_LEN(a) > MP_LEN(b)) { |
301 | mp *t = a; a = b; b = t; |
302 | } |
303 | n = MP_LEN(mm->m); |
d3409d5e |
304 | |
ef5f4810 |
305 | a = MP_COPY(a); |
306 | b = MP_COPY(b); |
d34decd2 |
307 | MP_DEST(d, 2 * n + 1, a->f | b->f | MP_UNDEF); |
ef5f4810 |
308 | dv = d->v; dvl = d->vl; |
309 | MPX_ZERO(dv, dvl); |
310 | av = a->v; avl = a->vl; |
311 | bv = b->v; bvl = b->vl; |
312 | mv = mm->m->v; mvl = mm->m->vl; |
313 | y = *bv; |
314 | |
315 | /* --- Montgomery multiplication phase --- */ |
316 | |
317 | i = 0; |
f5f35081 |
318 | mi = mm->mi->v[0]; |
ef5f4810 |
319 | while (i < n && av < avl) { |
320 | mpw x = *av++; |
f5f35081 |
321 | mpw u = MPW((*dv + x * y) * mi); |
ef5f4810 |
322 | MPX_UMLAN(dv, dvl, bv, bvl, x); |
323 | MPX_UMLAN(dv, dvl, mv, mvl, u); |
324 | dv++; |
325 | i++; |
326 | } |
d3409d5e |
327 | |
ef5f4810 |
328 | /* --- Simpler Montgomery reduction phase --- */ |
d3409d5e |
329 | |
ef5f4810 |
330 | while (i < n) { |
f5f35081 |
331 | mpw u = MPW(*dv * mi); |
ef5f4810 |
332 | MPX_UMLAN(dv, dvl, mv, mvl, u); |
333 | dv++; |
334 | i++; |
335 | } |
d3409d5e |
336 | |
ef5f4810 |
337 | /* --- Done --- */ |
d3409d5e |
338 | |
ef5f4810 |
339 | memmove(d->v, dv, MPWS(dvl - dv)); |
340 | d->vl -= dv - d->v; |
032099d1 |
341 | if (MPX_UCMP(d->v, d->vl, >=, mm->m->v, mm->m->vl)) |
342 | mpx_usub(d->v, d->vl, d->v, d->vl, mm->m->v, mm->m->vl); |
343 | if ((a->f ^ b->f) & MP_NEG) |
344 | mpx_usub(d->v, d->vl, mm->m->v, mm->m->vl, d->v, d->vl); |
ef5f4810 |
345 | MP_SHRINK(d); |
346 | d->f = (a->f | b->f) & MP_BURN; |
ef5f4810 |
347 | MP_DROP(a); |
348 | MP_DROP(b); |
d3409d5e |
349 | } |
350 | |
d3409d5e |
351 | return (d); |
352 | } |
353 | |
52e4b041 |
354 | #endif |
355 | |
17ad212e |
356 | /* --- @mpmont_expr@ --- * |
d3409d5e |
357 | * |
358 | * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context |
ef5f4810 |
359 | * @mp *d@ = fake destination |
360 | * @mp *a@ = base |
361 | * @mp *e@ = exponent |
d3409d5e |
362 | * |
17ad212e |
363 | * Returns: Result, %$a^e R \bmod m$%. |
d3409d5e |
364 | */ |
365 | |
c9d4c30b |
366 | #define WINSZ 5 |
367 | #define TABSZ (1 << (WINSZ - 1)) |
368 | |
369 | #define THRESH (((MPW_BITS / WINSZ) << 2) + 1) |
370 | |
371 | static mp *exp_simple(mpmont *mm, mp *d, mp *a, mp *e) |
d3409d5e |
372 | { |
373 | mpscan sc; |
c9d4c30b |
374 | mp *ar; |
ef5f4810 |
375 | mp *x = MP_COPY(mm->r); |
d34decd2 |
376 | mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW; |
c9d4c30b |
377 | unsigned sq = 0; |
378 | |
379 | mp_rscan(&sc, e); |
380 | if (!MP_RSTEP(&sc)) |
381 | goto exit; |
382 | while (!MP_RBIT(&sc)) |
383 | MP_RSTEP(&sc); |
384 | |
385 | /* --- Do the main body of the work --- */ |
386 | |
387 | ar = mpmont_mul(mm, MP_NEW, a, mm->r2); |
388 | for (;;) { |
389 | sq++; |
390 | while (sq) { |
391 | mp *y; |
392 | y = mp_sqr(spare, x); |
393 | y = mpmont_reduce(mm, y, y); |
394 | spare = x; x = y; |
395 | sq--; |
396 | } |
397 | { mp *y = mpmont_mul(mm, spare, x, ar); spare = x; x = y; } |
398 | sq = 0; |
d3409d5e |
399 | for (;;) { |
c9d4c30b |
400 | if (!MP_RSTEP(&sc)) |
401 | goto done; |
402 | if (MP_RBIT(&sc)) |
d3409d5e |
403 | break; |
c9d4c30b |
404 | sq++; |
d3409d5e |
405 | } |
406 | } |
c9d4c30b |
407 | |
408 | /* --- Do a final round of squaring --- */ |
409 | |
410 | done: |
411 | while (sq) { |
412 | mp *y; |
413 | y = mp_sqr(spare, x); |
414 | y = mpmont_reduce(mm, y, y); |
415 | spare = x; x = y; |
416 | sq--; |
417 | } |
418 | |
419 | /* --- Done --- */ |
420 | |
d3409d5e |
421 | MP_DROP(ar); |
c9d4c30b |
422 | exit: |
17ad212e |
423 | if (spare != MP_NEW) |
424 | MP_DROP(spare); |
ef5f4810 |
425 | if (d != MP_NEW) |
426 | MP_DROP(d); |
427 | return (x); |
17ad212e |
428 | } |
429 | |
c9d4c30b |
430 | mp *mpmont_expr(mpmont *mm, mp *d, mp *a, mp *e) |
431 | { |
432 | mp **tab; |
433 | mp *ar, *a2; |
434 | mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW; |
435 | mp *x = MP_COPY(mm->r); |
436 | unsigned i, sq = 0; |
437 | mpscan sc; |
438 | |
439 | /* --- Do we bother? --- */ |
440 | |
441 | MP_SHRINK(e); |
442 | if (MP_LEN(e) == 0) |
443 | goto exit; |
444 | if (MP_LEN(e) < THRESH) { |
445 | x->ref--; |
446 | return (exp_simple(mm, d, a, e)); |
447 | } |
448 | |
449 | /* --- Do the precomputation --- */ |
450 | |
451 | ar = mpmont_mul(mm, MP_NEW, a, mm->r2); |
452 | a2 = mp_sqr(MP_NEW, ar); |
453 | a2 = mpmont_reduce(mm, a2, a2); |
454 | tab = xmalloc(TABSZ * sizeof(mp *)); |
455 | tab[0] = ar; |
456 | for (i = 1; i < TABSZ; i++) |
457 | tab[i] = mpmont_mul(mm, MP_NEW, tab[i - 1], a2); |
458 | mp_drop(a2); |
459 | mp_rscan(&sc, e); |
460 | |
461 | /* --- Skip top-end zero bits --- * |
462 | * |
463 | * If the initial step worked, there must be a set bit somewhere, so keep |
464 | * stepping until I find it. |
465 | */ |
466 | |
467 | MP_RSTEP(&sc); |
468 | while (!MP_RBIT(&sc)) { |
469 | MP_RSTEP(&sc); |
470 | } |
471 | |
472 | /* --- Now for the main work --- */ |
473 | |
474 | for (;;) { |
475 | unsigned l = 0; |
476 | unsigned z = 0; |
477 | |
478 | /* --- The next bit is set, so read a window index --- * |
479 | * |
480 | * Reset @i@ to zero and increment @sq@. Then, until either I read |
481 | * @WINSZ@ bits or I run out of bits, scan in a bit: if it's clear, bump |
482 | * the @z@ counter; if it's set, push a set bit into @i@, shift it over |
483 | * by @z@ bits, bump @sq@ by @z + 1@ and clear @z@. By the end of this |
484 | * palaver, @i@ is an index to the precomputed value in @tab@. |
485 | */ |
486 | |
487 | i = 0; |
488 | sq++; |
489 | for (;;) { |
490 | l++; |
491 | if (l >= WINSZ || !MP_RSTEP(&sc)) |
492 | break; |
493 | if (!MP_RBIT(&sc)) |
494 | z++; |
495 | else { |
496 | i = ((i << 1) | 1) << z; |
497 | sq += z + 1; |
498 | z = 0; |
499 | } |
500 | } |
501 | |
502 | /* --- Do the squaring --- * |
503 | * |
504 | * Remember that @sq@ carries over from the zero-skipping stuff below. |
505 | */ |
506 | |
507 | while (sq) { |
508 | mp *y; |
509 | y = mp_sqr(spare, x); |
510 | y = mpmont_reduce(mm, y, y); |
511 | spare = x; x = y; |
512 | sq--; |
513 | } |
514 | |
515 | /* --- Do the multiply --- */ |
516 | |
517 | { mp *y = mpmont_mul(mm, spare, x, tab[i]); spare = x; x = y; } |
518 | |
519 | /* --- Now grind along through the rest of the bits --- */ |
520 | |
521 | sq = z; |
522 | for (;;) { |
523 | if (!MP_RSTEP(&sc)) |
524 | goto done; |
525 | if (MP_RBIT(&sc)) |
526 | break; |
527 | sq++; |
528 | } |
529 | } |
530 | |
531 | /* --- Do a final round of squaring --- */ |
532 | |
533 | done: |
534 | while (sq) { |
535 | mp *y; |
536 | y = mp_sqr(spare, x); |
537 | y = mpmont_reduce(mm, y, y); |
538 | spare = x; x = y; |
539 | sq--; |
540 | } |
541 | |
542 | /* --- Done --- */ |
543 | |
544 | for (i = 0; i < TABSZ; i++) |
545 | mp_drop(tab[i]); |
546 | xfree(tab); |
547 | exit: |
548 | if (d != MP_NEW) |
549 | mp_drop(d); |
550 | if (spare) |
551 | mp_drop(spare); |
552 | return (x); |
553 | } |
554 | |
17ad212e |
555 | /* --- @mpmont_exp@ --- * |
556 | * |
557 | * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context |
ef5f4810 |
558 | * @mp *d@ = fake destination |
559 | * @mp *a@ = base |
560 | * @mp *e@ = exponent |
17ad212e |
561 | * |
562 | * Returns: Result, %$a^e \bmod m$%. |
563 | */ |
564 | |
ef5f4810 |
565 | mp *mpmont_exp(mpmont *mm, mp *d, mp *a, mp *e) |
17ad212e |
566 | { |
ef5f4810 |
567 | d = mpmont_expr(mm, d, a, e); |
17ad212e |
568 | d = mpmont_reduce(mm, d, d); |
569 | return (d); |
d3409d5e |
570 | } |
571 | |
572 | /*----- Test rig ----------------------------------------------------------*/ |
573 | |
574 | #ifdef TEST_RIG |
575 | |
576 | static int tcreate(dstr *v) |
577 | { |
578 | mp *m = *(mp **)v[0].buf; |
579 | mp *mi = *(mp **)v[1].buf; |
580 | mp *r = *(mp **)v[2].buf; |
581 | mp *r2 = *(mp **)v[3].buf; |
582 | |
583 | mpmont mm; |
584 | int ok = 1; |
585 | |
586 | mpmont_create(&mm, m); |
587 | |
f5f35081 |
588 | if (mm.mi->v[0] != mi->v[0]) { |
d3409d5e |
589 | fprintf(stderr, "\n*** bad mi: found %lu, expected %lu", |
f5f35081 |
590 | (unsigned long)mm.mi->v[0], (unsigned long)mi->v[0]); |
d3409d5e |
591 | fputs("\nm = ", stderr); mp_writefile(m, stderr, 10); |
592 | fputc('\n', stderr); |
593 | ok = 0; |
594 | } |
595 | |
032099d1 |
596 | if (!MP_EQ(mm.r, r)) { |
d3409d5e |
597 | fputs("\n*** bad r", stderr); |
598 | fputs("\nm = ", stderr); mp_writefile(m, stderr, 10); |
599 | fputs("\nexpected ", stderr); mp_writefile(r, stderr, 10); |
17ad212e |
600 | fputs("\n found ", stderr); mp_writefile(mm.r, stderr, 10); |
d3409d5e |
601 | fputc('\n', stderr); |
602 | ok = 0; |
603 | } |
604 | |
032099d1 |
605 | if (!MP_EQ(mm.r2, r2)) { |
d3409d5e |
606 | fputs("\n*** bad r2", stderr); |
607 | fputs("\nm = ", stderr); mp_writefile(m, stderr, 10); |
608 | fputs("\nexpected ", stderr); mp_writefile(r2, stderr, 10); |
17ad212e |
609 | fputs("\n found ", stderr); mp_writefile(mm.r2, stderr, 10); |
d3409d5e |
610 | fputc('\n', stderr); |
611 | ok = 0; |
612 | } |
613 | |
614 | MP_DROP(m); |
615 | MP_DROP(mi); |
616 | MP_DROP(r); |
617 | MP_DROP(r2); |
618 | mpmont_destroy(&mm); |
ef5f4810 |
619 | assert(mparena_count(MPARENA_GLOBAL) == 0); |
d3409d5e |
620 | return (ok); |
621 | } |
622 | |
623 | static int tmul(dstr *v) |
624 | { |
625 | mp *m = *(mp **)v[0].buf; |
626 | mp *a = *(mp **)v[1].buf; |
627 | mp *b = *(mp **)v[2].buf; |
628 | mp *r = *(mp **)v[3].buf; |
d3409d5e |
629 | int ok = 1; |
630 | |
631 | mpmont mm; |
632 | mpmont_create(&mm, m); |
633 | |
634 | { |
79a34029 |
635 | mp *qr = mp_mul(MP_NEW, a, b); |
636 | mp_div(0, &qr, qr, m); |
637 | |
032099d1 |
638 | if (!MP_EQ(qr, r)) { |
79a34029 |
639 | fputs("\n*** classical modmul failed", stderr); |
640 | fputs("\n m = ", stderr); mp_writefile(m, stderr, 10); |
641 | fputs("\n a = ", stderr); mp_writefile(a, stderr, 10); |
642 | fputs("\n b = ", stderr); mp_writefile(b, stderr, 10); |
643 | fputs("\n r = ", stderr); mp_writefile(r, stderr, 10); |
644 | fputs("\nqr = ", stderr); mp_writefile(qr, stderr, 10); |
645 | fputc('\n', stderr); |
646 | ok = 0; |
647 | } |
648 | |
649 | mp_drop(qr); |
650 | } |
651 | |
652 | { |
d3409d5e |
653 | mp *ar = mpmont_mul(&mm, MP_NEW, a, mm.r2); |
654 | mp *br = mpmont_mul(&mm, MP_NEW, b, mm.r2); |
79a34029 |
655 | mp *mr = mpmont_mul(&mm, MP_NEW, ar, br); |
d3409d5e |
656 | mr = mpmont_reduce(&mm, mr, mr); |
032099d1 |
657 | if (!MP_EQ(mr, r)) { |
79a34029 |
658 | fputs("\n*** montgomery modmul failed", stderr); |
659 | fputs("\n m = ", stderr); mp_writefile(m, stderr, 10); |
660 | fputs("\n a = ", stderr); mp_writefile(a, stderr, 10); |
661 | fputs("\n b = ", stderr); mp_writefile(b, stderr, 10); |
662 | fputs("\n r = ", stderr); mp_writefile(r, stderr, 10); |
663 | fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10); |
664 | fputc('\n', stderr); |
665 | ok = 0; |
666 | } |
d3409d5e |
667 | MP_DROP(ar); MP_DROP(br); |
79a34029 |
668 | mp_drop(mr); |
d3409d5e |
669 | } |
670 | |
d3409d5e |
671 | |
672 | MP_DROP(m); |
673 | MP_DROP(a); |
674 | MP_DROP(b); |
675 | MP_DROP(r); |
d3409d5e |
676 | mpmont_destroy(&mm); |
ef5f4810 |
677 | assert(mparena_count(MPARENA_GLOBAL) == 0); |
d3409d5e |
678 | return ok; |
679 | } |
680 | |
681 | static int texp(dstr *v) |
682 | { |
683 | mp *m = *(mp **)v[0].buf; |
684 | mp *a = *(mp **)v[1].buf; |
685 | mp *b = *(mp **)v[2].buf; |
686 | mp *r = *(mp **)v[3].buf; |
687 | mp *mr; |
688 | int ok = 1; |
689 | |
690 | mpmont mm; |
691 | mpmont_create(&mm, m); |
692 | |
ef5f4810 |
693 | mr = mpmont_exp(&mm, MP_NEW, a, b); |
d3409d5e |
694 | |
032099d1 |
695 | if (!MP_EQ(mr, r)) { |
d3409d5e |
696 | fputs("\n*** montgomery modexp failed", stderr); |
697 | fputs("\n m = ", stderr); mp_writefile(m, stderr, 10); |
698 | fputs("\n a = ", stderr); mp_writefile(a, stderr, 10); |
699 | fputs("\n e = ", stderr); mp_writefile(b, stderr, 10); |
700 | fputs("\n r = ", stderr); mp_writefile(r, stderr, 10); |
701 | fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10); |
702 | fputc('\n', stderr); |
703 | ok = 0; |
704 | } |
705 | |
706 | MP_DROP(m); |
707 | MP_DROP(a); |
708 | MP_DROP(b); |
709 | MP_DROP(r); |
710 | MP_DROP(mr); |
711 | mpmont_destroy(&mm); |
ef5f4810 |
712 | assert(mparena_count(MPARENA_GLOBAL) == 0); |
d3409d5e |
713 | return ok; |
714 | } |
715 | |
716 | |
717 | static test_chunk tests[] = { |
ef5f4810 |
718 | { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } }, |
719 | { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } }, |
720 | { "exp", texp, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } }, |
d3409d5e |
721 | { 0, 0, { 0 } }, |
722 | }; |
723 | |
724 | int main(int argc, char *argv[]) |
725 | { |
726 | sub_init(); |
727 | test_run(argc, argv, tests, SRCDIR "/tests/mpmont"); |
728 | return (0); |
729 | } |
730 | |
731 | #endif |
732 | |
733 | /*----- That's all, folks -------------------------------------------------*/ |