Now @mp_drop@ checks its argument is non-NULL before attempting to free
[u/mdw/catacomb] / mpmont.c
1 /* -*-c-*-
2 *
3 * $Id: mpmont.c,v 1.13 2001/02/03 12:00:29 mdw Exp $
4 *
5 * Montgomery reduction
6 *
7 * (c) 1999 Straylight/Edgeware
8 */
9
10 /*----- Licensing notice --------------------------------------------------*
11 *
12 * This file is part of Catacomb.
13 *
14 * Catacomb is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU Library General Public License as
16 * published by the Free Software Foundation; either version 2 of the
17 * License, or (at your option) any later version.
18 *
19 * Catacomb is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU Library General Public License for more details.
23 *
24 * You should have received a copy of the GNU Library General Public
25 * License along with Catacomb; if not, write to the Free
26 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
27 * MA 02111-1307, USA.
28 */
29
30 /*----- Revision history --------------------------------------------------*
31 *
32 * $Log: mpmont.c,v $
33 * Revision 1.13 2001/02/03 12:00:29 mdw
34 * Now @mp_drop@ checks its argument is non-NULL before attempting to free
35 * it. Note that the macro version @MP_DROP@ doesn't do this.
36 *
37 * Revision 1.12 2000/10/08 15:48:35 mdw
38 * Rename Karatsuba constants now that we have @gfx_kmul@ too.
39 *
40 * Revision 1.11 2000/10/08 12:04:27 mdw
41 * (mpmont_reduce, mpmont_mul): Cope with negative numbers.
42 *
43 * Revision 1.10 2000/07/29 17:05:43 mdw
44 * (mpmont_expr): Use sliding window exponentiation, with a drop-through
45 * for small exponents to use a simple left-to-right bitwise routine. This
46 * can reduce modexp times by up to a quarter.
47 *
48 * Revision 1.9 2000/06/17 11:45:09 mdw
49 * Major memory management overhaul. Added arena support. Use the secure
50 * arena for secret integers. Replace and improve the MP management macros
51 * (e.g., replace MP_MODIFY by MP_DEST).
52 *
53 * Revision 1.8 1999/12/22 15:55:00 mdw
54 * Adjust Karatsuba parameters.
55 *
56 * Revision 1.7 1999/12/11 01:51:14 mdw
57 * Use a Karatsuba-based reduction for large moduli.
58 *
59 * Revision 1.6 1999/12/10 23:18:39 mdw
60 * Change interface for suggested destinations.
61 *
62 * Revision 1.5 1999/11/22 13:58:40 mdw
63 * Add an option to disable Montgomery reduction, so that performance
64 * comparisons can be done.
65 *
66 * Revision 1.4 1999/11/21 12:27:06 mdw
67 * Remove a division from the Montgomery setup by calculating
68 * %$R^2 \bmod m$% first and then %$R \bmod m$% by Montgomery reduction of
69 * %$R^2$%.
70 *
71 * Revision 1.3 1999/11/21 11:35:10 mdw
72 * Performance improvement: use @mp_sqr@ and @mpmont_reduce@ instead of
73 * @mpmont_mul@ for squaring in exponentiation.
74 *
75 * Revision 1.2 1999/11/19 13:17:26 mdw
76 * Add extra interface to exponentiation which returns a Montgomerized
77 * result.
78 *
79 * Revision 1.1 1999/11/17 18:02:16 mdw
80 * New multiprecision integer arithmetic suite.
81 *
82 */
83
84 /*----- Header files ------------------------------------------------------*/
85
86 #include "mp.h"
87 #include "mpmont.h"
88
89 /*----- Tweakables --------------------------------------------------------*/
90
91 /* --- @MPMONT_DISABLE@ --- *
92 *
93 * Replace all the clever Montgomery reduction with good old-fashioned long
94 * division.
95 */
96
97 /* #define MPMONT_DISABLE */
98
99 /*----- Main code ---------------------------------------------------------*/
100
101 /* --- @mpmont_create@ --- *
102 *
103 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
104 * @mp *m@ = modulus to use
105 *
106 * Returns: ---
107 *
108 * Use: Initializes a Montgomery reduction context ready for use.
109 * The argument @m@ must be a positive odd integer.
110 */
111
112 #ifdef MPMONT_DISABLE
113
114 void mpmont_create(mpmont *mm, mp *m)
115 {
116 mp_shrink(m);
117 mm->m = MP_COPY(m);
118 mm->r = MP_ONE;
119 mm->r2 = MP_ONE;
120 mm->mi = MP_ONE;
121 }
122
123 #else
124
125 void mpmont_create(mpmont *mm, mp *m)
126 {
127 size_t n = MP_LEN(m);
128 mp *r2 = mp_new(2 * n + 1, 0);
129 mp r;
130
131 /* --- Validate the arguments --- */
132
133 assert(((void)"Montgomery modulus must be positive",
134 (m->f & MP_NEG) == 0));
135 assert(((void)"Montgomery modulus must be odd", m->v[0] & 1));
136
137 /* --- Take a copy of the modulus --- */
138
139 mp_shrink(m);
140 mm->m = MP_COPY(m);
141
142 /* --- Determine %$R^2$% --- */
143
144 mm->n = n;
145 MPX_ZERO(r2->v, r2->vl - 1);
146 r2->vl[-1] = 1;
147
148 /* --- Find the magic value @mi@ --- */
149
150 mp_build(&r, r2->v + n, r2->vl);
151 mm->mi = MP_NEW;
152 mp_gcd(0, 0, &mm->mi, &r, m);
153 mm->mi = mp_sub(mm->mi, &r, mm->mi);
154
155 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
156
157 mm->r2 = MP_NEW;
158 mp_div(0, &mm->r2, r2, m);
159 mm->r = mpmont_reduce(mm, MP_NEW, mm->r2);
160 MP_DROP(r2);
161 }
162
163 #endif
164
165 /* --- @mpmont_destroy@ --- *
166 *
167 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
168 *
169 * Returns: ---
170 *
171 * Use: Disposes of a context when it's no longer of any use to
172 * anyone.
173 */
174
175 void mpmont_destroy(mpmont *mm)
176 {
177 MP_DROP(mm->m);
178 MP_DROP(mm->r);
179 MP_DROP(mm->r2);
180 MP_DROP(mm->mi);
181 }
182
183 /* --- @mpmont_reduce@ --- *
184 *
185 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
186 * @mp *d@ = destination
187 * @mp *a@ = source, assumed positive
188 *
189 * Returns: Result, %$a R^{-1} \bmod m$%.
190 */
191
192 #ifdef MPMONT_DISABLE
193
194 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
195 {
196 mp_div(0, &d, a, mm->m);
197 return (d);
198 }
199
200 #else
201
202 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
203 {
204 size_t n = mm->n;
205
206 /* --- Check for serious Karatsuba reduction --- */
207
208 if (n > MPK_THRESH * 3) {
209 mp al;
210 mpw *vl;
211 mp *u;
212
213 if (MP_LEN(a) >= n)
214 vl = a->v + n;
215 else
216 vl = a->vl;
217 mp_build(&al, a->v, vl);
218 u = mp_mul(MP_NEW, &al, mm->mi);
219 if (MP_LEN(u) > n)
220 u->vl = u->v + n;
221 u = mp_mul(u, u, mm->m);
222 d = mp_add(d, a, u);
223 mp_drop(u);
224 }
225
226 /* --- Otherwise do it the hard way --- */
227
228 else {
229 mpw *dv, *dvl;
230 mpw *mv, *mvl;
231 mpw mi;
232 size_t k = n;
233
234 /* --- Initial conditioning of the arguments --- */
235
236 a = MP_COPY(a);
237 if (d)
238 MP_DROP(d);
239 d = a;
240 MP_DEST(d, 2 * n + 1, a->f);
241
242 dv = d->v; dvl = d->vl;
243 mv = mm->m->v; mvl = mm->m->vl;
244
245 /* --- Let's go to work --- */
246
247 mi = mm->mi->v[0];
248 while (k--) {
249 mpw u = MPW(*dv * mi);
250 MPX_UMLAN(dv, dvl, mv, mvl, u);
251 dv++;
252 }
253 }
254
255 /* --- Wrap everything up --- */
256
257 memmove(d->v, d->v + n, MPWS(MP_LEN(d) - n));
258 d->vl -= n;
259 if (MPX_UCMP(d->v, d->vl, >=, mm->m->v, mm->m->vl))
260 mpx_usub(d->v, d->vl, d->v, d->vl, mm->m->v, mm->m->vl);
261 if (d->f & MP_NEG) {
262 mpx_usub(d->v, d->vl, mm->m->v, mm->m->vl, d->v, d->vl);
263 d->f &= ~MP_NEG;
264 }
265 MP_SHRINK(d);
266 return (d);
267 }
268
269 #endif
270
271 /* --- @mpmont_mul@ --- *
272 *
273 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
274 * @mp *d@ = destination
275 * @mp *a, *b@ = sources, assumed positive
276 *
277 * Returns: Result, %$a b R^{-1} \bmod m$%.
278 */
279
280 #ifdef MPMONT_DISABLE
281
282 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
283 {
284 d = mp_mul(d, a, b);
285 mp_div(0, &d, d, mm->m);
286 return (d);
287 }
288
289 #else
290
291 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
292 {
293 if (mm->n > MPK_THRESH * 3) {
294 d = mp_mul(d, a, b);
295 d = mpmont_reduce(mm, d, d);
296 } else {
297 mpw *dv, *dvl;
298 mpw *av, *avl;
299 mpw *bv, *bvl;
300 mpw *mv, *mvl;
301 mpw y;
302 size_t n, i;
303 mpw mi;
304
305 /* --- Initial conditioning of the arguments --- */
306
307 if (MP_LEN(a) > MP_LEN(b)) {
308 mp *t = a; a = b; b = t;
309 }
310 n = MP_LEN(mm->m);
311
312 a = MP_COPY(a);
313 b = MP_COPY(b);
314 MP_DEST(d, 2 * n + 1, a->f | b->f | MP_UNDEF);
315 dv = d->v; dvl = d->vl;
316 MPX_ZERO(dv, dvl);
317 av = a->v; avl = a->vl;
318 bv = b->v; bvl = b->vl;
319 mv = mm->m->v; mvl = mm->m->vl;
320 y = *bv;
321
322 /* --- Montgomery multiplication phase --- */
323
324 i = 0;
325 mi = mm->mi->v[0];
326 while (i < n && av < avl) {
327 mpw x = *av++;
328 mpw u = MPW((*dv + x * y) * mi);
329 MPX_UMLAN(dv, dvl, bv, bvl, x);
330 MPX_UMLAN(dv, dvl, mv, mvl, u);
331 dv++;
332 i++;
333 }
334
335 /* --- Simpler Montgomery reduction phase --- */
336
337 while (i < n) {
338 mpw u = MPW(*dv * mi);
339 MPX_UMLAN(dv, dvl, mv, mvl, u);
340 dv++;
341 i++;
342 }
343
344 /* --- Done --- */
345
346 memmove(d->v, dv, MPWS(dvl - dv));
347 d->vl -= dv - d->v;
348 if (MPX_UCMP(d->v, d->vl, >=, mm->m->v, mm->m->vl))
349 mpx_usub(d->v, d->vl, d->v, d->vl, mm->m->v, mm->m->vl);
350 if ((a->f ^ b->f) & MP_NEG)
351 mpx_usub(d->v, d->vl, mm->m->v, mm->m->vl, d->v, d->vl);
352 MP_SHRINK(d);
353 d->f = (a->f | b->f) & MP_BURN;
354 MP_DROP(a);
355 MP_DROP(b);
356 }
357
358 return (d);
359 }
360
361 #endif
362
363 /* --- @mpmont_expr@ --- *
364 *
365 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
366 * @mp *d@ = fake destination
367 * @mp *a@ = base
368 * @mp *e@ = exponent
369 *
370 * Returns: Result, %$a^e R \bmod m$%.
371 */
372
373 #define WINSZ 5
374 #define TABSZ (1 << (WINSZ - 1))
375
376 #define THRESH (((MPW_BITS / WINSZ) << 2) + 1)
377
378 static mp *exp_simple(mpmont *mm, mp *d, mp *a, mp *e)
379 {
380 mpscan sc;
381 mp *ar;
382 mp *x = MP_COPY(mm->r);
383 mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW;
384 unsigned sq = 0;
385
386 mp_rscan(&sc, e);
387 if (!MP_RSTEP(&sc))
388 goto exit;
389 while (!MP_RBIT(&sc))
390 MP_RSTEP(&sc);
391
392 /* --- Do the main body of the work --- */
393
394 ar = mpmont_mul(mm, MP_NEW, a, mm->r2);
395 for (;;) {
396 sq++;
397 while (sq) {
398 mp *y;
399 y = mp_sqr(spare, x);
400 y = mpmont_reduce(mm, y, y);
401 spare = x; x = y;
402 sq--;
403 }
404 { mp *y = mpmont_mul(mm, spare, x, ar); spare = x; x = y; }
405 sq = 0;
406 for (;;) {
407 if (!MP_RSTEP(&sc))
408 goto done;
409 if (MP_RBIT(&sc))
410 break;
411 sq++;
412 }
413 }
414
415 /* --- Do a final round of squaring --- */
416
417 done:
418 while (sq) {
419 mp *y;
420 y = mp_sqr(spare, x);
421 y = mpmont_reduce(mm, y, y);
422 spare = x; x = y;
423 sq--;
424 }
425
426 /* --- Done --- */
427
428 MP_DROP(ar);
429 exit:
430 if (spare != MP_NEW)
431 MP_DROP(spare);
432 if (d != MP_NEW)
433 MP_DROP(d);
434 return (x);
435 }
436
437 mp *mpmont_expr(mpmont *mm, mp *d, mp *a, mp *e)
438 {
439 mp **tab;
440 mp *ar, *a2;
441 mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW;
442 mp *x = MP_COPY(mm->r);
443 unsigned i, sq = 0;
444 mpscan sc;
445
446 /* --- Do we bother? --- */
447
448 MP_SHRINK(e);
449 if (MP_LEN(e) == 0)
450 goto exit;
451 if (MP_LEN(e) < THRESH) {
452 x->ref--;
453 return (exp_simple(mm, d, a, e));
454 }
455
456 /* --- Do the precomputation --- */
457
458 ar = mpmont_mul(mm, MP_NEW, a, mm->r2);
459 a2 = mp_sqr(MP_NEW, ar);
460 a2 = mpmont_reduce(mm, a2, a2);
461 tab = xmalloc(TABSZ * sizeof(mp *));
462 tab[0] = ar;
463 for (i = 1; i < TABSZ; i++)
464 tab[i] = mpmont_mul(mm, MP_NEW, tab[i - 1], a2);
465 mp_drop(a2);
466 mp_rscan(&sc, e);
467
468 /* --- Skip top-end zero bits --- *
469 *
470 * If the initial step worked, there must be a set bit somewhere, so keep
471 * stepping until I find it.
472 */
473
474 MP_RSTEP(&sc);
475 while (!MP_RBIT(&sc)) {
476 MP_RSTEP(&sc);
477 }
478
479 /* --- Now for the main work --- */
480
481 for (;;) {
482 unsigned l = 0;
483 unsigned z = 0;
484
485 /* --- The next bit is set, so read a window index --- *
486 *
487 * Reset @i@ to zero and increment @sq@. Then, until either I read
488 * @WINSZ@ bits or I run out of bits, scan in a bit: if it's clear, bump
489 * the @z@ counter; if it's set, push a set bit into @i@, shift it over
490 * by @z@ bits, bump @sq@ by @z + 1@ and clear @z@. By the end of this
491 * palaver, @i@ is an index to the precomputed value in @tab@.
492 */
493
494 i = 0;
495 sq++;
496 for (;;) {
497 l++;
498 if (l >= WINSZ || !MP_RSTEP(&sc))
499 break;
500 if (!MP_RBIT(&sc))
501 z++;
502 else {
503 i = ((i << 1) | 1) << z;
504 sq += z + 1;
505 z = 0;
506 }
507 }
508
509 /* --- Do the squaring --- *
510 *
511 * Remember that @sq@ carries over from the zero-skipping stuff below.
512 */
513
514 while (sq) {
515 mp *y;
516 y = mp_sqr(spare, x);
517 y = mpmont_reduce(mm, y, y);
518 spare = x; x = y;
519 sq--;
520 }
521
522 /* --- Do the multiply --- */
523
524 { mp *y = mpmont_mul(mm, spare, x, tab[i]); spare = x; x = y; }
525
526 /* --- Now grind along through the rest of the bits --- */
527
528 sq = z;
529 for (;;) {
530 if (!MP_RSTEP(&sc))
531 goto done;
532 if (MP_RBIT(&sc))
533 break;
534 sq++;
535 }
536 }
537
538 /* --- Do a final round of squaring --- */
539
540 done:
541 while (sq) {
542 mp *y;
543 y = mp_sqr(spare, x);
544 y = mpmont_reduce(mm, y, y);
545 spare = x; x = y;
546 sq--;
547 }
548
549 /* --- Done --- */
550
551 for (i = 0; i < TABSZ; i++)
552 mp_drop(tab[i]);
553 xfree(tab);
554 exit:
555 mp_drop(d);
556 mp_drop(spare);
557 return (x);
558 }
559
560 /* --- @mpmont_exp@ --- *
561 *
562 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
563 * @mp *d@ = fake destination
564 * @mp *a@ = base
565 * @mp *e@ = exponent
566 *
567 * Returns: Result, %$a^e \bmod m$%.
568 */
569
570 mp *mpmont_exp(mpmont *mm, mp *d, mp *a, mp *e)
571 {
572 d = mpmont_expr(mm, d, a, e);
573 d = mpmont_reduce(mm, d, d);
574 return (d);
575 }
576
577 /*----- Test rig ----------------------------------------------------------*/
578
579 #ifdef TEST_RIG
580
581 static int tcreate(dstr *v)
582 {
583 mp *m = *(mp **)v[0].buf;
584 mp *mi = *(mp **)v[1].buf;
585 mp *r = *(mp **)v[2].buf;
586 mp *r2 = *(mp **)v[3].buf;
587
588 mpmont mm;
589 int ok = 1;
590
591 mpmont_create(&mm, m);
592
593 if (mm.mi->v[0] != mi->v[0]) {
594 fprintf(stderr, "\n*** bad mi: found %lu, expected %lu",
595 (unsigned long)mm.mi->v[0], (unsigned long)mi->v[0]);
596 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
597 fputc('\n', stderr);
598 ok = 0;
599 }
600
601 if (!MP_EQ(mm.r, r)) {
602 fputs("\n*** bad r", stderr);
603 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
604 fputs("\nexpected ", stderr); mp_writefile(r, stderr, 10);
605 fputs("\n found ", stderr); mp_writefile(mm.r, stderr, 10);
606 fputc('\n', stderr);
607 ok = 0;
608 }
609
610 if (!MP_EQ(mm.r2, r2)) {
611 fputs("\n*** bad r2", stderr);
612 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
613 fputs("\nexpected ", stderr); mp_writefile(r2, stderr, 10);
614 fputs("\n found ", stderr); mp_writefile(mm.r2, stderr, 10);
615 fputc('\n', stderr);
616 ok = 0;
617 }
618
619 MP_DROP(m);
620 MP_DROP(mi);
621 MP_DROP(r);
622 MP_DROP(r2);
623 mpmont_destroy(&mm);
624 assert(mparena_count(MPARENA_GLOBAL) == 0);
625 return (ok);
626 }
627
628 static int tmul(dstr *v)
629 {
630 mp *m = *(mp **)v[0].buf;
631 mp *a = *(mp **)v[1].buf;
632 mp *b = *(mp **)v[2].buf;
633 mp *r = *(mp **)v[3].buf;
634 int ok = 1;
635
636 mpmont mm;
637 mpmont_create(&mm, m);
638
639 {
640 mp *qr = mp_mul(MP_NEW, a, b);
641 mp_div(0, &qr, qr, m);
642
643 if (!MP_EQ(qr, r)) {
644 fputs("\n*** classical modmul failed", stderr);
645 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
646 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
647 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
648 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
649 fputs("\nqr = ", stderr); mp_writefile(qr, stderr, 10);
650 fputc('\n', stderr);
651 ok = 0;
652 }
653
654 mp_drop(qr);
655 }
656
657 {
658 mp *ar = mpmont_mul(&mm, MP_NEW, a, mm.r2);
659 mp *br = mpmont_mul(&mm, MP_NEW, b, mm.r2);
660 mp *mr = mpmont_mul(&mm, MP_NEW, ar, br);
661 mr = mpmont_reduce(&mm, mr, mr);
662 if (!MP_EQ(mr, r)) {
663 fputs("\n*** montgomery modmul failed", stderr);
664 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
665 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
666 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
667 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
668 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
669 fputc('\n', stderr);
670 ok = 0;
671 }
672 MP_DROP(ar); MP_DROP(br);
673 mp_drop(mr);
674 }
675
676
677 MP_DROP(m);
678 MP_DROP(a);
679 MP_DROP(b);
680 MP_DROP(r);
681 mpmont_destroy(&mm);
682 assert(mparena_count(MPARENA_GLOBAL) == 0);
683 return ok;
684 }
685
686 static int texp(dstr *v)
687 {
688 mp *m = *(mp **)v[0].buf;
689 mp *a = *(mp **)v[1].buf;
690 mp *b = *(mp **)v[2].buf;
691 mp *r = *(mp **)v[3].buf;
692 mp *mr;
693 int ok = 1;
694
695 mpmont mm;
696 mpmont_create(&mm, m);
697
698 mr = mpmont_exp(&mm, MP_NEW, a, b);
699
700 if (!MP_EQ(mr, r)) {
701 fputs("\n*** montgomery modexp failed", stderr);
702 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
703 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
704 fputs("\n e = ", stderr); mp_writefile(b, stderr, 10);
705 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
706 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
707 fputc('\n', stderr);
708 ok = 0;
709 }
710
711 MP_DROP(m);
712 MP_DROP(a);
713 MP_DROP(b);
714 MP_DROP(r);
715 MP_DROP(mr);
716 mpmont_destroy(&mm);
717 assert(mparena_count(MPARENA_GLOBAL) == 0);
718 return ok;
719 }
720
721
722 static test_chunk tests[] = {
723 { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
724 { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
725 { "exp", texp, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
726 { 0, 0, { 0 } },
727 };
728
729 int main(int argc, char *argv[])
730 {
731 sub_init();
732 test_run(argc, argv, tests, SRCDIR "/tests/mpmont");
733 return (0);
734 }
735
736 #endif
737
738 /*----- That's all, folks -------------------------------------------------*/