Add an internal-representation no-op function.
[u/mdw/catacomb] / mpmont.c
1 /* -*-c-*-
2 *
3 * $Id: mpmont.c,v 1.14 2001/02/22 09:04:26 mdw Exp $
4 *
5 * Montgomery reduction
6 *
7 * (c) 1999 Straylight/Edgeware
8 */
9
10 /*----- Licensing notice --------------------------------------------------*
11 *
12 * This file is part of Catacomb.
13 *
14 * Catacomb is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU Library General Public License as
16 * published by the Free Software Foundation; either version 2 of the
17 * License, or (at your option) any later version.
18 *
19 * Catacomb is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU Library General Public License for more details.
23 *
24 * You should have received a copy of the GNU Library General Public
25 * License along with Catacomb; if not, write to the Free
26 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
27 * MA 02111-1307, USA.
28 */
29
30 /*----- Revision history --------------------------------------------------*
31 *
32 * $Log: mpmont.c,v $
33 * Revision 1.14 2001/02/22 09:04:26 mdw
34 * Cosmetic fix.
35 *
36 * Revision 1.13 2001/02/03 12:00:29 mdw
37 * Now @mp_drop@ checks its argument is non-NULL before attempting to free
38 * it. Note that the macro version @MP_DROP@ doesn't do this.
39 *
40 * Revision 1.12 2000/10/08 15:48:35 mdw
41 * Rename Karatsuba constants now that we have @gfx_kmul@ too.
42 *
43 * Revision 1.11 2000/10/08 12:04:27 mdw
44 * (mpmont_reduce, mpmont_mul): Cope with negative numbers.
45 *
46 * Revision 1.10 2000/07/29 17:05:43 mdw
47 * (mpmont_expr): Use sliding window exponentiation, with a drop-through
48 * for small exponents to use a simple left-to-right bitwise routine. This
49 * can reduce modexp times by up to a quarter.
50 *
51 * Revision 1.9 2000/06/17 11:45:09 mdw
52 * Major memory management overhaul. Added arena support. Use the secure
53 * arena for secret integers. Replace and improve the MP management macros
54 * (e.g., replace MP_MODIFY by MP_DEST).
55 *
56 * Revision 1.8 1999/12/22 15:55:00 mdw
57 * Adjust Karatsuba parameters.
58 *
59 * Revision 1.7 1999/12/11 01:51:14 mdw
60 * Use a Karatsuba-based reduction for large moduli.
61 *
62 * Revision 1.6 1999/12/10 23:18:39 mdw
63 * Change interface for suggested destinations.
64 *
65 * Revision 1.5 1999/11/22 13:58:40 mdw
66 * Add an option to disable Montgomery reduction, so that performance
67 * comparisons can be done.
68 *
69 * Revision 1.4 1999/11/21 12:27:06 mdw
70 * Remove a division from the Montgomery setup by calculating
71 * %$R^2 \bmod m$% first and then %$R \bmod m$% by Montgomery reduction of
72 * %$R^2$%.
73 *
74 * Revision 1.3 1999/11/21 11:35:10 mdw
75 * Performance improvement: use @mp_sqr@ and @mpmont_reduce@ instead of
76 * @mpmont_mul@ for squaring in exponentiation.
77 *
78 * Revision 1.2 1999/11/19 13:17:26 mdw
79 * Add extra interface to exponentiation which returns a Montgomerized
80 * result.
81 *
82 * Revision 1.1 1999/11/17 18:02:16 mdw
83 * New multiprecision integer arithmetic suite.
84 *
85 */
86
87 /*----- Header files ------------------------------------------------------*/
88
89 #include "mp.h"
90 #include "mpmont.h"
91
92 /*----- Tweakables --------------------------------------------------------*/
93
94 /* --- @MPMONT_DISABLE@ --- *
95 *
96 * Replace all the clever Montgomery reduction with good old-fashioned long
97 * division.
98 */
99
100 /* #define MPMONT_DISABLE */
101
102 /*----- Main code ---------------------------------------------------------*/
103
104 /* --- @mpmont_create@ --- *
105 *
106 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
107 * @mp *m@ = modulus to use
108 *
109 * Returns: ---
110 *
111 * Use: Initializes a Montgomery reduction context ready for use.
112 * The argument @m@ must be a positive odd integer.
113 */
114
115 #ifdef MPMONT_DISABLE
116
117 void mpmont_create(mpmont *mm, mp *m)
118 {
119 mp_shrink(m);
120 mm->m = MP_COPY(m);
121 mm->r = MP_ONE;
122 mm->r2 = MP_ONE;
123 mm->mi = MP_ONE;
124 }
125
126 #else
127
128 void mpmont_create(mpmont *mm, mp *m)
129 {
130 size_t n = MP_LEN(m);
131 mp *r2 = mp_new(2 * n + 1, 0);
132 mp r;
133
134 /* --- Validate the arguments --- */
135
136 assert(((void)"Montgomery modulus must be positive",
137 (m->f & MP_NEG) == 0));
138 assert(((void)"Montgomery modulus must be odd", m->v[0] & 1));
139
140 /* --- Take a copy of the modulus --- */
141
142 mp_shrink(m);
143 mm->m = MP_COPY(m);
144
145 /* --- Determine %$R^2$% --- */
146
147 mm->n = n;
148 MPX_ZERO(r2->v, r2->vl - 1);
149 r2->vl[-1] = 1;
150
151 /* --- Find the magic value @mi@ --- */
152
153 mp_build(&r, r2->v + n, r2->vl);
154 mm->mi = MP_NEW;
155 mp_gcd(0, 0, &mm->mi, &r, m);
156 mm->mi = mp_sub(mm->mi, &r, mm->mi);
157
158 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
159
160 mm->r2 = MP_NEW;
161 mp_div(0, &mm->r2, r2, m);
162 mm->r = mpmont_reduce(mm, MP_NEW, mm->r2);
163 MP_DROP(r2);
164 }
165
166 #endif
167
168 /* --- @mpmont_destroy@ --- *
169 *
170 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
171 *
172 * Returns: ---
173 *
174 * Use: Disposes of a context when it's no longer of any use to
175 * anyone.
176 */
177
178 void mpmont_destroy(mpmont *mm)
179 {
180 MP_DROP(mm->m);
181 MP_DROP(mm->r);
182 MP_DROP(mm->r2);
183 MP_DROP(mm->mi);
184 }
185
186 /* --- @mpmont_reduce@ --- *
187 *
188 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
189 * @mp *d@ = destination
190 * @mp *a@ = source, assumed positive
191 *
192 * Returns: Result, %$a R^{-1} \bmod m$%.
193 */
194
195 #ifdef MPMONT_DISABLE
196
197 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
198 {
199 mp_div(0, &d, a, mm->m);
200 return (d);
201 }
202
203 #else
204
205 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
206 {
207 size_t n = mm->n;
208
209 /* --- Check for serious Karatsuba reduction --- */
210
211 if (n > MPK_THRESH * 3) {
212 mp al;
213 mpw *vl;
214 mp *u;
215
216 if (MP_LEN(a) >= n)
217 vl = a->v + n;
218 else
219 vl = a->vl;
220 mp_build(&al, a->v, vl);
221 u = mp_mul(MP_NEW, &al, mm->mi);
222 if (MP_LEN(u) > n)
223 u->vl = u->v + n;
224 u = mp_mul(u, u, mm->m);
225 d = mp_add(d, a, u);
226 mp_drop(u);
227 }
228
229 /* --- Otherwise do it the hard way --- */
230
231 else {
232 mpw *dv, *dvl;
233 mpw *mv, *mvl;
234 mpw mi;
235 size_t k = n;
236
237 /* --- Initial conditioning of the arguments --- */
238
239 a = MP_COPY(a);
240 if (d)
241 MP_DROP(d);
242 d = a;
243 MP_DEST(d, 2 * n + 1, a->f);
244
245 dv = d->v; dvl = d->vl;
246 mv = mm->m->v; mvl = mm->m->vl;
247
248 /* --- Let's go to work --- */
249
250 mi = mm->mi->v[0];
251 while (k--) {
252 mpw u = MPW(*dv * mi);
253 MPX_UMLAN(dv, dvl, mv, mvl, u);
254 dv++;
255 }
256 }
257
258 /* --- Wrap everything up --- */
259
260 memmove(d->v, d->v + n, MPWS(MP_LEN(d) - n));
261 d->vl -= n;
262 if (MPX_UCMP(d->v, d->vl, >=, mm->m->v, mm->m->vl))
263 mpx_usub(d->v, d->vl, d->v, d->vl, mm->m->v, mm->m->vl);
264 if (d->f & MP_NEG) {
265 mpx_usub(d->v, d->vl, mm->m->v, mm->m->vl, d->v, d->vl);
266 d->f &= ~MP_NEG;
267 }
268 MP_SHRINK(d);
269 return (d);
270 }
271
272 #endif
273
274 /* --- @mpmont_mul@ --- *
275 *
276 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
277 * @mp *d@ = destination
278 * @mp *a, *b@ = sources, assumed positive
279 *
280 * Returns: Result, %$a b R^{-1} \bmod m$%.
281 */
282
283 #ifdef MPMONT_DISABLE
284
285 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
286 {
287 d = mp_mul(d, a, b);
288 mp_div(0, &d, d, mm->m);
289 return (d);
290 }
291
292 #else
293
294 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
295 {
296 if (mm->n > MPK_THRESH * 3) {
297 d = mp_mul(d, a, b);
298 d = mpmont_reduce(mm, d, d);
299 } else {
300 mpw *dv, *dvl;
301 mpw *av, *avl;
302 mpw *bv, *bvl;
303 mpw *mv, *mvl;
304 mpw y;
305 size_t n, i;
306 mpw mi;
307
308 /* --- Initial conditioning of the arguments --- */
309
310 if (MP_LEN(a) > MP_LEN(b)) {
311 mp *t = a; a = b; b = t;
312 }
313 n = MP_LEN(mm->m);
314
315 a = MP_COPY(a);
316 b = MP_COPY(b);
317 MP_DEST(d, 2 * n + 1, a->f | b->f | MP_UNDEF);
318 dv = d->v; dvl = d->vl;
319 MPX_ZERO(dv, dvl);
320 av = a->v; avl = a->vl;
321 bv = b->v; bvl = b->vl;
322 mv = mm->m->v; mvl = mm->m->vl;
323 y = *bv;
324
325 /* --- Montgomery multiplication phase --- */
326
327 i = 0;
328 mi = mm->mi->v[0];
329 while (i < n && av < avl) {
330 mpw x = *av++;
331 mpw u = MPW((*dv + x * y) * mi);
332 MPX_UMLAN(dv, dvl, bv, bvl, x);
333 MPX_UMLAN(dv, dvl, mv, mvl, u);
334 dv++;
335 i++;
336 }
337
338 /* --- Simpler Montgomery reduction phase --- */
339
340 while (i < n) {
341 mpw u = MPW(*dv * mi);
342 MPX_UMLAN(dv, dvl, mv, mvl, u);
343 dv++;
344 i++;
345 }
346
347 /* --- Done --- */
348
349 memmove(d->v, dv, MPWS(dvl - dv));
350 d->vl -= dv - d->v;
351 if (MPX_UCMP(d->v, d->vl, >=, mm->m->v, mm->m->vl))
352 mpx_usub(d->v, d->vl, d->v, d->vl, mm->m->v, mm->m->vl);
353 if ((a->f ^ b->f) & MP_NEG)
354 mpx_usub(d->v, d->vl, mm->m->v, mm->m->vl, d->v, d->vl);
355 MP_SHRINK(d);
356 d->f = (a->f | b->f) & MP_BURN;
357 MP_DROP(a);
358 MP_DROP(b);
359 }
360
361 return (d);
362 }
363
364 #endif
365
366 /* --- @mpmont_expr@ --- *
367 *
368 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
369 * @mp *d@ = fake destination
370 * @mp *a@ = base
371 * @mp *e@ = exponent
372 *
373 * Returns: Result, %$a^e R \bmod m$%.
374 */
375
376 #define WINSZ 5
377 #define TABSZ (1 << (WINSZ - 1))
378
379 #define THRESH (((MPW_BITS / WINSZ) << 2) + 1)
380
381 static mp *exp_simple(mpmont *mm, mp *d, mp *a, mp *e)
382 {
383 mpscan sc;
384 mp *ar;
385 mp *x = MP_COPY(mm->r);
386 mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW;
387 unsigned sq = 0;
388
389 mp_rscan(&sc, e);
390 if (!MP_RSTEP(&sc))
391 goto exit;
392 while (!MP_RBIT(&sc))
393 MP_RSTEP(&sc);
394
395 /* --- Do the main body of the work --- */
396
397 ar = mpmont_mul(mm, MP_NEW, a, mm->r2);
398 for (;;) {
399 sq++;
400 while (sq) {
401 mp *y;
402 y = mp_sqr(spare, x);
403 y = mpmont_reduce(mm, y, y);
404 spare = x; x = y;
405 sq--;
406 }
407 { mp *y = mpmont_mul(mm, spare, x, ar); spare = x; x = y; }
408 sq = 0;
409 for (;;) {
410 if (!MP_RSTEP(&sc))
411 goto done;
412 if (MP_RBIT(&sc))
413 break;
414 sq++;
415 }
416 }
417
418 /* --- Do a final round of squaring --- */
419
420 done:
421 while (sq) {
422 mp *y;
423 y = mp_sqr(spare, x);
424 y = mpmont_reduce(mm, y, y);
425 spare = x; x = y;
426 sq--;
427 }
428
429 /* --- Done --- */
430
431 MP_DROP(ar);
432 exit:
433 if (spare != MP_NEW)
434 MP_DROP(spare);
435 if (d != MP_NEW)
436 MP_DROP(d);
437 return (x);
438 }
439
440 mp *mpmont_expr(mpmont *mm, mp *d, mp *a, mp *e)
441 {
442 mp **tab;
443 mp *ar, *a2;
444 mp *spare = (e->f & MP_BURN) ? MP_NEWSEC : MP_NEW;
445 mp *x = MP_COPY(mm->r);
446 unsigned i, sq = 0;
447 mpscan sc;
448
449 /* --- Do we bother? --- */
450
451 MP_SHRINK(e);
452 if (MP_LEN(e) == 0)
453 goto exit;
454 if (MP_LEN(e) < THRESH) {
455 x->ref--;
456 return (exp_simple(mm, d, a, e));
457 }
458
459 /* --- Do the precomputation --- */
460
461 ar = mpmont_mul(mm, MP_NEW, a, mm->r2);
462 a2 = mp_sqr(MP_NEW, ar);
463 a2 = mpmont_reduce(mm, a2, a2);
464 tab = xmalloc(TABSZ * sizeof(mp *));
465 tab[0] = ar;
466 for (i = 1; i < TABSZ; i++)
467 tab[i] = mpmont_mul(mm, MP_NEW, tab[i - 1], a2);
468 mp_drop(a2);
469 mp_rscan(&sc, e);
470
471 /* --- Skip top-end zero bits --- *
472 *
473 * If the initial step worked, there must be a set bit somewhere, so keep
474 * stepping until I find it.
475 */
476
477 MP_RSTEP(&sc);
478 while (!MP_RBIT(&sc))
479 MP_RSTEP(&sc);
480
481 /* --- Now for the main work --- */
482
483 for (;;) {
484 unsigned l = 0;
485 unsigned z = 0;
486
487 /* --- The next bit is set, so read a window index --- *
488 *
489 * Reset @i@ to zero and increment @sq@. Then, until either I read
490 * @WINSZ@ bits or I run out of bits, scan in a bit: if it's clear, bump
491 * the @z@ counter; if it's set, push a set bit into @i@, shift it over
492 * by @z@ bits, bump @sq@ by @z + 1@ and clear @z@. By the end of this
493 * palaver, @i@ is an index to the precomputed value in @tab@.
494 */
495
496 i = 0;
497 sq++;
498 for (;;) {
499 l++;
500 if (l >= WINSZ || !MP_RSTEP(&sc))
501 break;
502 if (!MP_RBIT(&sc))
503 z++;
504 else {
505 i = ((i << 1) | 1) << z;
506 sq += z + 1;
507 z = 0;
508 }
509 }
510
511 /* --- Do the squaring --- *
512 *
513 * Remember that @sq@ carries over from the zero-skipping stuff below.
514 */
515
516 while (sq) {
517 mp *y;
518 y = mp_sqr(spare, x);
519 y = mpmont_reduce(mm, y, y);
520 spare = x; x = y;
521 sq--;
522 }
523
524 /* --- Do the multiply --- */
525
526 { mp *y = mpmont_mul(mm, spare, x, tab[i]); spare = x; x = y; }
527
528 /* --- Now grind along through the rest of the bits --- */
529
530 sq = z;
531 for (;;) {
532 if (!MP_RSTEP(&sc))
533 goto done;
534 if (MP_RBIT(&sc))
535 break;
536 sq++;
537 }
538 }
539
540 /* --- Do a final round of squaring --- */
541
542 done:
543 while (sq) {
544 mp *y;
545 y = mp_sqr(spare, x);
546 y = mpmont_reduce(mm, y, y);
547 spare = x; x = y;
548 sq--;
549 }
550
551 /* --- Done --- */
552
553 for (i = 0; i < TABSZ; i++)
554 mp_drop(tab[i]);
555 xfree(tab);
556 exit:
557 mp_drop(d);
558 mp_drop(spare);
559 return (x);
560 }
561
562 /* --- @mpmont_exp@ --- *
563 *
564 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
565 * @mp *d@ = fake destination
566 * @mp *a@ = base
567 * @mp *e@ = exponent
568 *
569 * Returns: Result, %$a^e \bmod m$%.
570 */
571
572 mp *mpmont_exp(mpmont *mm, mp *d, mp *a, mp *e)
573 {
574 d = mpmont_expr(mm, d, a, e);
575 d = mpmont_reduce(mm, d, d);
576 return (d);
577 }
578
579 /*----- Test rig ----------------------------------------------------------*/
580
581 #ifdef TEST_RIG
582
583 static int tcreate(dstr *v)
584 {
585 mp *m = *(mp **)v[0].buf;
586 mp *mi = *(mp **)v[1].buf;
587 mp *r = *(mp **)v[2].buf;
588 mp *r2 = *(mp **)v[3].buf;
589
590 mpmont mm;
591 int ok = 1;
592
593 mpmont_create(&mm, m);
594
595 if (mm.mi->v[0] != mi->v[0]) {
596 fprintf(stderr, "\n*** bad mi: found %lu, expected %lu",
597 (unsigned long)mm.mi->v[0], (unsigned long)mi->v[0]);
598 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
599 fputc('\n', stderr);
600 ok = 0;
601 }
602
603 if (!MP_EQ(mm.r, r)) {
604 fputs("\n*** bad r", stderr);
605 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
606 fputs("\nexpected ", stderr); mp_writefile(r, stderr, 10);
607 fputs("\n found ", stderr); mp_writefile(mm.r, stderr, 10);
608 fputc('\n', stderr);
609 ok = 0;
610 }
611
612 if (!MP_EQ(mm.r2, r2)) {
613 fputs("\n*** bad r2", stderr);
614 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
615 fputs("\nexpected ", stderr); mp_writefile(r2, stderr, 10);
616 fputs("\n found ", stderr); mp_writefile(mm.r2, stderr, 10);
617 fputc('\n', stderr);
618 ok = 0;
619 }
620
621 MP_DROP(m);
622 MP_DROP(mi);
623 MP_DROP(r);
624 MP_DROP(r2);
625 mpmont_destroy(&mm);
626 assert(mparena_count(MPARENA_GLOBAL) == 0);
627 return (ok);
628 }
629
630 static int tmul(dstr *v)
631 {
632 mp *m = *(mp **)v[0].buf;
633 mp *a = *(mp **)v[1].buf;
634 mp *b = *(mp **)v[2].buf;
635 mp *r = *(mp **)v[3].buf;
636 int ok = 1;
637
638 mpmont mm;
639 mpmont_create(&mm, m);
640
641 {
642 mp *qr = mp_mul(MP_NEW, a, b);
643 mp_div(0, &qr, qr, m);
644
645 if (!MP_EQ(qr, r)) {
646 fputs("\n*** classical modmul failed", stderr);
647 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
648 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
649 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
650 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
651 fputs("\nqr = ", stderr); mp_writefile(qr, stderr, 10);
652 fputc('\n', stderr);
653 ok = 0;
654 }
655
656 mp_drop(qr);
657 }
658
659 {
660 mp *ar = mpmont_mul(&mm, MP_NEW, a, mm.r2);
661 mp *br = mpmont_mul(&mm, MP_NEW, b, mm.r2);
662 mp *mr = mpmont_mul(&mm, MP_NEW, ar, br);
663 mr = mpmont_reduce(&mm, mr, mr);
664 if (!MP_EQ(mr, r)) {
665 fputs("\n*** montgomery modmul failed", stderr);
666 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
667 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
668 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
669 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
670 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
671 fputc('\n', stderr);
672 ok = 0;
673 }
674 MP_DROP(ar); MP_DROP(br);
675 mp_drop(mr);
676 }
677
678
679 MP_DROP(m);
680 MP_DROP(a);
681 MP_DROP(b);
682 MP_DROP(r);
683 mpmont_destroy(&mm);
684 assert(mparena_count(MPARENA_GLOBAL) == 0);
685 return ok;
686 }
687
688 static int texp(dstr *v)
689 {
690 mp *m = *(mp **)v[0].buf;
691 mp *a = *(mp **)v[1].buf;
692 mp *b = *(mp **)v[2].buf;
693 mp *r = *(mp **)v[3].buf;
694 mp *mr;
695 int ok = 1;
696
697 mpmont mm;
698 mpmont_create(&mm, m);
699
700 mr = mpmont_exp(&mm, MP_NEW, a, b);
701
702 if (!MP_EQ(mr, r)) {
703 fputs("\n*** montgomery modexp failed", stderr);
704 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
705 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
706 fputs("\n e = ", stderr); mp_writefile(b, stderr, 10);
707 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
708 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
709 fputc('\n', stderr);
710 ok = 0;
711 }
712
713 MP_DROP(m);
714 MP_DROP(a);
715 MP_DROP(b);
716 MP_DROP(r);
717 MP_DROP(mr);
718 mpmont_destroy(&mm);
719 assert(mparena_count(MPARENA_GLOBAL) == 0);
720 return ok;
721 }
722
723
724 static test_chunk tests[] = {
725 { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
726 { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
727 { "exp", texp, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
728 { 0, 0, { 0 } },
729 };
730
731 int main(int argc, char *argv[])
732 {
733 sub_init();
734 test_run(argc, argv, tests, SRCDIR "/tests/mpmont");
735 return (0);
736 }
737
738 #endif
739
740 /*----- That's all, folks -------------------------------------------------*/