math/mpx-mul4-{arm-neon,arm64-simd}.S, etc.: Add ARM versions of `mul4'.
[catacomb] / math / mpmont.c
CommitLineData
d3409d5e 1/* -*-c-*-
2 *
d3409d5e 3 * Montgomery reduction
4 *
5 * (c) 1999 Straylight/Edgeware
6 */
7
45c0fd36 8/*----- Licensing notice --------------------------------------------------*
d3409d5e 9 *
10 * This file is part of Catacomb.
11 *
12 * Catacomb is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
45c0fd36 16 *
d3409d5e 17 * Catacomb is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
45c0fd36 21 *
d3409d5e 22 * You should have received a copy of the GNU Library General Public
23 * License along with Catacomb; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 * MA 02111-1307, USA.
26 */
27
d3409d5e 28/*----- Header files ------------------------------------------------------*/
29
444083ae
MW
30#include "config.h"
31#include "dispatch.h"
d3409d5e 32#include "mp.h"
33#include "mpmont.h"
34
52e4b041 35/*----- Tweakables --------------------------------------------------------*/
36
37/* --- @MPMONT_DISABLE@ --- *
38 *
39 * Replace all the clever Montgomery reduction with good old-fashioned long
40 * division.
41 */
42
43/* #define MPMONT_DISABLE */
44
d6b9dc04
MW
45#define MPMONT_KTHRESH (16*MPK_THRESH)
46
0e70bd46
MW
47/*----- Low-level implementation ------------------------------------------*/
48
49#ifndef MPMONT_DISABLE
50
51/* --- @redccore@ --- *
52 *
53 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
54 * @const mpw *mv@ = base of modulus %$m$%
55 * @size_t n@ = length of modulus
56 * @const mpw *mi@ = base of REDC coefficient %$m'$%
57 *
58 * Returns: ---
59 *
60 * Use: Let %$a$% be the input operand. Store in %$d$% the value
61 * %$a + (m' a \bmod R) m$%. The destination has space for at
62 * least %$2 n + 1$% words of result.
63 */
64
444083ae
MW
65CPU_DISPATCH(static, (void), void, redccore,
66 (mpw *dv, mpw *dvl, const mpw *mv, size_t n, const mpw *mi),
67 (dv, dvl, mv, n, mi), pick_redccore, simple_redccore);
68
69static void simple_redccore(mpw *dv, mpw *dvl, const mpw *mv,
70 size_t n, const mpw *mi)
0e70bd46
MW
71{
72 mpw mi0 = *mi;
73 size_t i;
74
75 for (i = 0; i < n; i++) {
76 MPX_UMLAN(dv, dvl, mv, mv + n, MPW(*dv*mi0));
77 dv++;
78 }
79}
80
444083ae
MW
81#define MAYBE_REDC4(impl) \
82 extern void mpxmont_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
83 size_t n, const mpw *mi); \
84 static void maybe_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
85 size_t n, const mpw *mi) \
86 { \
87 if (n%4) simple_redccore(dv, dvl, mv, n, mi); \
88 else mpxmont_redc4_##impl(dv, dvl, mv, n, mi); \
89 }
90
91#if CPUFAM_X86
92 MAYBE_REDC4(x86_sse2)
b9b279b4 93 MAYBE_REDC4(x86_avx)
444083ae
MW
94#endif
95
3119b3ae
MW
96#if CPUFAM_AMD64
97 MAYBE_REDC4(amd64_sse2)
b9b279b4 98 MAYBE_REDC4(amd64_avx)
3119b3ae
MW
99#endif
100
ea1b3cec
MW
101#if CPUFAM_ARMEL
102 MAYBE_REDC4(arm_neon)
103#endif
104
105#if CPUFAM_ARM64
106 MAYBE_REDC4(arm64_simd)
107#endif
108
444083ae
MW
109static redccore__functype *pick_redccore(void)
110{
111#if CPUFAM_X86
b9b279b4
MW
112 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_x86_avx,
113 cpu_feature_p(CPUFEAT_X86_AVX));
444083ae
MW
114 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_x86_sse2,
115 cpu_feature_p(CPUFEAT_X86_SSE2));
116#endif
3119b3ae 117#if CPUFAM_AMD64
b9b279b4
MW
118 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_amd64_avx,
119 cpu_feature_p(CPUFEAT_X86_AVX));
3119b3ae
MW
120 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_amd64_sse2,
121 cpu_feature_p(CPUFEAT_X86_SSE2));
122#endif
ea1b3cec
MW
123#if CPUFAM_ARMEL
124 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_arm_neon,
125 cpu_feature_p(CPUFEAT_ARM_NEON));
126#endif
127#if CPUFAM_ARM64
128 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_arm64_simd, 1);
129#endif
444083ae
MW
130 DISPATCH_PICK_FALLBACK(mpmont_reduce, simple_redccore);
131}
132
c618228a 133/* --- @mulcore@ --- *
0e70bd46
MW
134 *
135 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
136 * @const mpw *av, *avl@ = base and limit of first multiplicand
137 * @const mpw *bv, *bvl@ = base and limit of second multiplicand
138 * @const mpw *mv@ = base of modulus %$m$%
139 * @size_t n@ = length of modulus
140 * @const mpw *mi@ = base of REDC coefficient %$m'$%
141 *
142 * Returns: ---
143 *
144 * Use: Let %$a$% and %$b$% be the multiplicands. Let %$w = a b$%.
145 * Store in %$d$% the value %$a b + (m' a b \bmod R) m$%.
146 */
147
444083ae
MW
148CPU_DISPATCH(static, (void), void, mulcore,
149 (mpw *dv, mpw *dvl, const mpw *av, const mpw *avl,
150 const mpw *bv, const mpw *bvl, const mpw *mv,
151 size_t n, const mpw *mi),
152 (dv, dvl, av, avl, bv, bvl, mv, n, mi),
153 pick_mulcore, simple_mulcore);
154
155static void simple_mulcore(mpw *dv, mpw *dvl,
156 const mpw *av, const mpw *avl,
157 const mpw *bv, const mpw *bvl,
158 const mpw *mv, size_t n, const mpw *mi)
0e70bd46
MW
159{
160 mpw ai, b0, y, mi0 = *mi;
161 const mpw *tv, *tvl;
162 const mpw *mvl = mv + n;
163 size_t i = 0;
164
165 /* --- Initial setup --- */
166
167 MPX_ZERO(dv, dvl);
168 if (avl - av > bvl - bv) {
169 tv = av; av = bv; bv = tv;
170 tvl = avl; avl = bvl; bvl = tvl;
171 }
172 b0 = *bv;
173
174 /* --- Multiply, until we run out of multiplicand --- */
175
176 while (i < n && av < avl) {
177 ai = *av++;
178 y = MPW((*dv + ai*b0)*mi0);
179 MPX_UMLAN(dv, dvl, bv, bvl, ai);
180 MPX_UMLAN(dv, dvl, mv, mvl, y);
181 dv++; i++;
182 }
183
184 /* --- Continue reducing until we run out of modulus --- */
185
186 while (i < n) {
187 y = MPW(*dv*mi0);
188 MPX_UMLAN(dv, dvl, mv, mvl, y);
189 dv++; i++;
190 }
191}
192
444083ae
MW
193#define MAYBE_MUL4(impl) \
194 extern void mpxmont_mul4_##impl(mpw *dv, \
195 const mpw *av, const mpw *bv, \
196 const mpw *mv, \
197 size_t n, const mpw *mi); \
198 static void maybe_mul4_##impl(mpw *dv, mpw *dvl, \
199 const mpw *av, const mpw *avl, \
200 const mpw *bv, const mpw *bvl, \
201 const mpw *mv, size_t n, const mpw *mi) \
202 { \
203 size_t an = avl - av, bn = bvl - bv; \
204 if (n%4 || an != n || bn != n) \
205 simple_mulcore(dv, dvl, av, avl, bv, bvl, mv, n, mi); \
206 else { \
207 mpxmont_mul4_##impl(dv, av, bv, mv, n, mi); \
208 MPX_ZERO(dv + 2*n + 1, dvl); \
209 } \
210 }
211
212#if CPUFAM_X86
213 MAYBE_MUL4(x86_sse2)
b9b279b4 214 MAYBE_MUL4(x86_avx)
444083ae
MW
215#endif
216
3119b3ae
MW
217#if CPUFAM_AMD64
218 MAYBE_MUL4(amd64_sse2)
b9b279b4 219 MAYBE_MUL4(amd64_avx)
3119b3ae
MW
220#endif
221
ea1b3cec
MW
222#if CPUFAM_ARMEL
223 MAYBE_MUL4(arm_neon)
224#endif
225
226#if CPUFAM_ARM64
227 MAYBE_MUL4(arm64_simd)
228#endif
229
444083ae
MW
230static mulcore__functype *pick_mulcore(void)
231{
232#if CPUFAM_X86
b9b279b4
MW
233 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_x86_avx,
234 cpu_feature_p(CPUFEAT_X86_AVX));
444083ae
MW
235 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_x86_sse2,
236 cpu_feature_p(CPUFEAT_X86_SSE2));
237#endif
3119b3ae 238#if CPUFAM_AMD64
b9b279b4
MW
239 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_amd64_avx,
240 cpu_feature_p(CPUFEAT_X86_AVX));
3119b3ae
MW
241 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_amd64_sse2,
242 cpu_feature_p(CPUFEAT_X86_SSE2));
243#endif
ea1b3cec
MW
244#if CPUFAM_ARMEL
245 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_arm_neon,
246 cpu_feature_p(CPUFEAT_ARM_NEON));
247#endif
248#if CPUFAM_ARM64
249 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_arm64_simd, 1);
250#endif
444083ae
MW
251 DISPATCH_PICK_FALLBACK(mpmont_mul, simple_mulcore);
252}
253
0e70bd46
MW
254/* --- @finish@ --- *
255 *
295f4f90
MW
256 * Arguments: @const mpmont *mm@ = pointer to a Montgomery reduction
257 * context
0e70bd46
MW
258 * *mp *d@ = pointer to mostly-reduced operand
259 *
260 * Returns: ---
261 *
262 * Use: Applies the finishing touches to Montgomery reduction. The
263 * operand @d@ is a multiple of %$R%$ at this point, so it needs
264 * to be shifted down; the result might need a further
265 * subtraction to get it into the right interval; and we may
266 * need to do an additional subtraction if %$d$% is negative.
267 */
268
295f4f90 269static void finish(const mpmont *mm, mp *d)
0e70bd46
MW
270{
271 mpw *dv = d->v, *dvl = d->vl;
272 size_t n = mm->n;
273
274 memmove(dv, dv + n, MPWS(dvl - (dv + n)));
275 dvl -= n;
276
277 if (MPX_UCMP(dv, dvl, >=, mm->m->v, mm->m->vl))
278 mpx_usub(dv, dvl, dv, dvl, mm->m->v, mm->m->vl);
279
280 if (d->f & MP_NEG) {
281 mpx_usub(dv, dvl, mm->m->v, mm->m->vl, dv, dvl);
282 d->f &= ~MP_NEG;
283 }
284
285 d->vl = dvl;
286 MP_SHRINK(d);
287}
288
289#endif
290
4640a0dd 291/*----- Reduction and multiplication --------------------------------------*/
d3409d5e 292
293/* --- @mpmont_create@ --- *
294 *
295 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
296 * @mp *m@ = modulus to use
297 *
f4535c64 298 * Returns: Zero on success, nonzero on error.
d3409d5e 299 *
300 * Use: Initializes a Montgomery reduction context ready for use.
ef5f4810 301 * The argument @m@ must be a positive odd integer.
d3409d5e 302 */
303
52e4b041 304#ifdef MPMONT_DISABLE
305
f4535c64 306int mpmont_create(mpmont *mm, mp *m)
52e4b041 307{
308 mp_shrink(m);
309 mm->m = MP_COPY(m);
310 mm->r = MP_ONE;
311 mm->r2 = MP_ONE;
f5f35081 312 mm->mi = MP_ONE;
f4535c64 313 return (0);
52e4b041 314}
315
316#else
317
f4535c64 318int mpmont_create(mpmont *mm, mp *m)
d3409d5e 319{
f5f35081 320 size_t n = MP_LEN(m);
d34decd2 321 mp *r2 = mp_new(2 * n + 1, 0);
f5f35081 322 mp r;
323
d3409d5e 324 /* --- Take a copy of the modulus --- */
325
f4535c64 326 if (!MP_POSP(m) || !MP_ODDP(m))
327 return (-1);
d3409d5e 328 mm->m = MP_COPY(m);
329
f5f35081 330 /* --- Determine %$R^2$% --- */
d3409d5e 331
f5f35081 332 mm->n = n;
333 MPX_ZERO(r2->v, r2->vl - 1);
334 r2->vl[-1] = 1;
d3409d5e 335
f5f35081 336 /* --- Find the magic value @mi@ --- */
337
338 mp_build(&r, r2->v + n, r2->vl);
b817bfc6 339 mm->mi = mp_modinv(MP_NEW, m, &r);
f5f35081 340 mm->mi = mp_sub(mm->mi, &r, mm->mi);
362c3d18 341 MP_ENSURE(mm->mi, n);
d3409d5e 342
343 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
344
f5f35081 345 mm->r2 = MP_NEW;
346 mp_div(0, &mm->r2, r2, m);
347 mm->r = mpmont_reduce(mm, MP_NEW, mm->r2);
348 MP_DROP(r2);
f4535c64 349 return (0);
d3409d5e 350}
351
52e4b041 352#endif
353
d3409d5e 354/* --- @mpmont_destroy@ --- *
355 *
356 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
357 *
358 * Returns: ---
359 *
360 * Use: Disposes of a context when it's no longer of any use to
361 * anyone.
362 */
363
364void mpmont_destroy(mpmont *mm)
365{
366 MP_DROP(mm->m);
367 MP_DROP(mm->r);
368 MP_DROP(mm->r2);
f5f35081 369 MP_DROP(mm->mi);
d3409d5e 370}
371
372/* --- @mpmont_reduce@ --- *
373 *
295f4f90 374 * Arguments: @const mpmont *mm@ = pointer to Montgomery reduction context
d3409d5e 375 * @mp *d@ = destination
ef5f4810 376 * @mp *a@ = source, assumed positive
d3409d5e 377 *
378 * Returns: Result, %$a R^{-1} \bmod m$%.
379 */
380
52e4b041 381#ifdef MPMONT_DISABLE
382
295f4f90 383mp *mpmont_reduce(const mpmont *mm, mp *d, mp *a)
52e4b041 384{
385 mp_div(0, &d, a, mm->m);
386 return (d);
387}
388
389#else
390
295f4f90 391mp *mpmont_reduce(const mpmont *mm, mp *d, mp *a)
d3409d5e 392{
f5f35081 393 size_t n = mm->n;
394
395 /* --- Check for serious Karatsuba reduction --- */
396
d6b9dc04 397 if (n > MPMONT_KTHRESH) {
f5f35081 398 mp al;
399 mpw *vl;
400 mp *u;
401
0e70bd46
MW
402 if (MP_LEN(a) >= n) vl = a->v + n;
403 else vl = a->vl;
f5f35081 404 mp_build(&al, a->v, vl);
405 u = mp_mul(MP_NEW, &al, mm->mi);
0e70bd46 406 if (MP_LEN(u) > n) u->vl = u->v + n;
f5f35081 407 u = mp_mul(u, u, mm->m);
408 d = mp_add(d, a, u);
366e9701 409 MP_ENSURE(d, n);
f5f35081 410 mp_drop(u);
411 }
d3409d5e 412
f5f35081 413 /* --- Otherwise do it the hard way --- */
d3409d5e 414
d3409d5e 415 else {
d34decd2 416 a = MP_COPY(a);
0e70bd46 417 if (d) MP_DROP(d);
d34decd2 418 d = a;
0e70bd46
MW
419 MP_DEST(d, 2*mm->n + 1, a->f);
420 redccore(d->v, d->vl, mm->m->v, mm->n, mm->mi->v);
d3409d5e 421 }
422
f5f35081 423 /* --- Wrap everything up --- */
d3409d5e 424
0e70bd46 425 finish(mm, d);
d3409d5e 426 return (d);
427}
428
52e4b041 429#endif
430
d3409d5e 431/* --- @mpmont_mul@ --- *
432 *
295f4f90 433 * Arguments: @const mpmont *mm@ = pointer to Montgomery reduction context
d3409d5e 434 * @mp *d@ = destination
ef5f4810 435 * @mp *a, *b@ = sources, assumed positive
d3409d5e 436 *
437 * Returns: Result, %$a b R^{-1} \bmod m$%.
438 */
439
52e4b041 440#ifdef MPMONT_DISABLE
441
295f4f90 442mp *mpmont_mul(const mpmont *mm, mp *d, mp *a, mp *b)
52e4b041 443{
444 d = mp_mul(d, a, b);
445 mp_div(0, &d, d, mm->m);
446 return (d);
447}
448
449#else
450
295f4f90 451mp *mpmont_mul(const mpmont *mm, mp *d, mp *a, mp *b)
d3409d5e 452{
444083ae
MW
453 size_t n = mm->n;
454
d6b9dc04 455 if (n > MPMONT_KTHRESH) {
ef5f4810 456 d = mp_mul(d, a, b);
457 d = mpmont_reduce(mm, d, d);
458 } else {
444083ae
MW
459 a = MP_COPY(a); b = MP_COPY(b);
460 MP_DEST(d, 2*n + 1, a->f | b->f | MP_UNDEF);
0e70bd46
MW
461 mulcore(d->v, d->vl, a->v, a->vl, b->v, b->vl,
462 mm->m->v, mm->n, mm->mi->v);
463 d->f = ((a->f | b->f) & MP_BURN) | ((a->f ^ b->f) & MP_NEG);
464 finish(mm, d);
465 MP_DROP(a); MP_DROP(b);
d3409d5e 466 }
467
d3409d5e 468 return (d);
469}
470
52e4b041 471#endif
472
d3409d5e 473/*----- Test rig ----------------------------------------------------------*/
474
475#ifdef TEST_RIG
476
416b8869
MW
477#ifdef ENABLE_ASM_DEBUG
478# include "regdump.h"
479#endif
480
d3409d5e 481static int tcreate(dstr *v)
482{
483 mp *m = *(mp **)v[0].buf;
484 mp *mi = *(mp **)v[1].buf;
485 mp *r = *(mp **)v[2].buf;
486 mp *r2 = *(mp **)v[3].buf;
487
488 mpmont mm;
489 int ok = 1;
490
491 mpmont_create(&mm, m);
492
f5f35081 493 if (mm.mi->v[0] != mi->v[0]) {
d3409d5e 494 fprintf(stderr, "\n*** bad mi: found %lu, expected %lu",
f5f35081 495 (unsigned long)mm.mi->v[0], (unsigned long)mi->v[0]);
d3409d5e 496 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
497 fputc('\n', stderr);
498 ok = 0;
499 }
500
032099d1 501 if (!MP_EQ(mm.r, r)) {
d3409d5e 502 fputs("\n*** bad r", stderr);
503 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
504 fputs("\nexpected ", stderr); mp_writefile(r, stderr, 10);
45c0fd36 505 fputs("\n found ", stderr); mp_writefile(mm.r, stderr, 10);
d3409d5e 506 fputc('\n', stderr);
507 ok = 0;
508 }
509
032099d1 510 if (!MP_EQ(mm.r2, r2)) {
d3409d5e 511 fputs("\n*** bad r2", stderr);
512 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
513 fputs("\nexpected ", stderr); mp_writefile(r2, stderr, 10);
45c0fd36 514 fputs("\n found ", stderr); mp_writefile(mm.r2, stderr, 10);
d3409d5e 515 fputc('\n', stderr);
516 ok = 0;
517 }
518
519 MP_DROP(m);
520 MP_DROP(mi);
521 MP_DROP(r);
522 MP_DROP(r2);
523 mpmont_destroy(&mm);
ef5f4810 524 assert(mparena_count(MPARENA_GLOBAL) == 0);
d3409d5e 525 return (ok);
526}
527
528static int tmul(dstr *v)
529{
530 mp *m = *(mp **)v[0].buf;
531 mp *a = *(mp **)v[1].buf;
532 mp *b = *(mp **)v[2].buf;
533 mp *r = *(mp **)v[3].buf;
d3409d5e 534 int ok = 1;
535
536 mpmont mm;
537 mpmont_create(&mm, m);
538
539 {
79a34029 540 mp *qr = mp_mul(MP_NEW, a, b);
541 mp_div(0, &qr, qr, m);
542
032099d1 543 if (!MP_EQ(qr, r)) {
79a34029 544 fputs("\n*** classical modmul failed", stderr);
545 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
546 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
547 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
548 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
549 fputs("\nqr = ", stderr); mp_writefile(qr, stderr, 10);
550 fputc('\n', stderr);
551 ok = 0;
552 }
553
554 mp_drop(qr);
555 }
556
557 {
d3409d5e 558 mp *ar = mpmont_mul(&mm, MP_NEW, a, mm.r2);
559 mp *br = mpmont_mul(&mm, MP_NEW, b, mm.r2);
79a34029 560 mp *mr = mpmont_mul(&mm, MP_NEW, ar, br);
d3409d5e 561 mr = mpmont_reduce(&mm, mr, mr);
032099d1 562 if (!MP_EQ(mr, r)) {
79a34029 563 fputs("\n*** montgomery modmul failed", stderr);
564 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
565 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
566 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
567 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
568 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
569 fputc('\n', stderr);
570 ok = 0;
571 }
d3409d5e 572 MP_DROP(ar); MP_DROP(br);
79a34029 573 mp_drop(mr);
d3409d5e 574 }
575
d3409d5e 576 MP_DROP(m);
577 MP_DROP(a);
578 MP_DROP(b);
579 MP_DROP(r);
d3409d5e 580 mpmont_destroy(&mm);
ef5f4810 581 assert(mparena_count(MPARENA_GLOBAL) == 0);
d3409d5e 582 return ok;
583}
584
d3409d5e 585static test_chunk tests[] = {
ef5f4810 586 { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
587 { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
d3409d5e 588 { 0, 0, { 0 } },
589};
590
591int main(int argc, char *argv[])
592{
593 sub_init();
416b8869
MW
594#ifdef ENABLE_ASM_DEBUG
595 regdump_init();
596#endif
0f00dc4c 597 test_run(argc, argv, tests, SRCDIR "/t/mpmont");
d3409d5e 598 return (0);
599}
600
601#endif
602
603/*----- That's all, folks -------------------------------------------------*/