math/mpx-mul4-amd64-sse2.S: SSE2 multipliers for AMD64.
[catacomb] / math / mpmont.c
1 /* -*-c-*-
2 *
3 * Montgomery reduction
4 *
5 * (c) 1999 Straylight/Edgeware
6 */
7
8 /*----- Licensing notice --------------------------------------------------*
9 *
10 * This file is part of Catacomb.
11 *
12 * Catacomb is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
16 *
17 * Catacomb is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
21 *
22 * You should have received a copy of the GNU Library General Public
23 * License along with Catacomb; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 * MA 02111-1307, USA.
26 */
27
28 /*----- Header files ------------------------------------------------------*/
29
30 #include "config.h"
31 #include "dispatch.h"
32 #include "mp.h"
33 #include "mpmont.h"
34
35 /*----- Tweakables --------------------------------------------------------*/
36
37 /* --- @MPMONT_DISABLE@ --- *
38 *
39 * Replace all the clever Montgomery reduction with good old-fashioned long
40 * division.
41 */
42
43 /* #define MPMONT_DISABLE */
44
45 #define MPMONT_KTHRESH (16*MPK_THRESH)
46
47 /*----- Low-level implementation ------------------------------------------*/
48
49 #ifndef MPMONT_DISABLE
50
51 /* --- @redccore@ --- *
52 *
53 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
54 * @const mpw *mv@ = base of modulus %$m$%
55 * @size_t n@ = length of modulus
56 * @const mpw *mi@ = base of REDC coefficient %$m'$%
57 *
58 * Returns: ---
59 *
60 * Use: Let %$a$% be the input operand. Store in %$d$% the value
61 * %$a + (m' a \bmod R) m$%. The destination has space for at
62 * least %$2 n + 1$% words of result.
63 */
64
65 CPU_DISPATCH(static, (void), void, redccore,
66 (mpw *dv, mpw *dvl, const mpw *mv, size_t n, const mpw *mi),
67 (dv, dvl, mv, n, mi), pick_redccore, simple_redccore);
68
69 static void simple_redccore(mpw *dv, mpw *dvl, const mpw *mv,
70 size_t n, const mpw *mi)
71 {
72 mpw mi0 = *mi;
73 size_t i;
74
75 for (i = 0; i < n; i++) {
76 MPX_UMLAN(dv, dvl, mv, mv + n, MPW(*dv*mi0));
77 dv++;
78 }
79 }
80
81 #define MAYBE_REDC4(impl) \
82 extern void mpxmont_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
83 size_t n, const mpw *mi); \
84 static void maybe_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
85 size_t n, const mpw *mi) \
86 { \
87 if (n%4) simple_redccore(dv, dvl, mv, n, mi); \
88 else mpxmont_redc4_##impl(dv, dvl, mv, n, mi); \
89 }
90
91 #if CPUFAM_X86
92 MAYBE_REDC4(x86_sse2)
93 #endif
94
95 #if CPUFAM_AMD64
96 MAYBE_REDC4(amd64_sse2)
97 #endif
98
99 static redccore__functype *pick_redccore(void)
100 {
101 #if CPUFAM_X86
102 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_x86_sse2,
103 cpu_feature_p(CPUFEAT_X86_SSE2));
104 #endif
105 #if CPUFAM_AMD64
106 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_amd64_sse2,
107 cpu_feature_p(CPUFEAT_X86_SSE2));
108 #endif
109 DISPATCH_PICK_FALLBACK(mpmont_reduce, simple_redccore);
110 }
111
112 /* --- @redccore@ --- *
113 *
114 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
115 * @const mpw *av, *avl@ = base and limit of first multiplicand
116 * @const mpw *bv, *bvl@ = base and limit of second multiplicand
117 * @const mpw *mv@ = base of modulus %$m$%
118 * @size_t n@ = length of modulus
119 * @const mpw *mi@ = base of REDC coefficient %$m'$%
120 *
121 * Returns: ---
122 *
123 * Use: Let %$a$% and %$b$% be the multiplicands. Let %$w = a b$%.
124 * Store in %$d$% the value %$a b + (m' a b \bmod R) m$%.
125 */
126
127 CPU_DISPATCH(static, (void), void, mulcore,
128 (mpw *dv, mpw *dvl, const mpw *av, const mpw *avl,
129 const mpw *bv, const mpw *bvl, const mpw *mv,
130 size_t n, const mpw *mi),
131 (dv, dvl, av, avl, bv, bvl, mv, n, mi),
132 pick_mulcore, simple_mulcore);
133
134 static void simple_mulcore(mpw *dv, mpw *dvl,
135 const mpw *av, const mpw *avl,
136 const mpw *bv, const mpw *bvl,
137 const mpw *mv, size_t n, const mpw *mi)
138 {
139 mpw ai, b0, y, mi0 = *mi;
140 const mpw *tv, *tvl;
141 const mpw *mvl = mv + n;
142 size_t i = 0;
143
144 /* --- Initial setup --- */
145
146 MPX_ZERO(dv, dvl);
147 if (avl - av > bvl - bv) {
148 tv = av; av = bv; bv = tv;
149 tvl = avl; avl = bvl; bvl = tvl;
150 }
151 b0 = *bv;
152
153 /* --- Multiply, until we run out of multiplicand --- */
154
155 while (i < n && av < avl) {
156 ai = *av++;
157 y = MPW((*dv + ai*b0)*mi0);
158 MPX_UMLAN(dv, dvl, bv, bvl, ai);
159 MPX_UMLAN(dv, dvl, mv, mvl, y);
160 dv++; i++;
161 }
162
163 /* --- Continue reducing until we run out of modulus --- */
164
165 while (i < n) {
166 y = MPW(*dv*mi0);
167 MPX_UMLAN(dv, dvl, mv, mvl, y);
168 dv++; i++;
169 }
170 }
171
172 #define MAYBE_MUL4(impl) \
173 extern void mpxmont_mul4_##impl(mpw *dv, \
174 const mpw *av, const mpw *bv, \
175 const mpw *mv, \
176 size_t n, const mpw *mi); \
177 static void maybe_mul4_##impl(mpw *dv, mpw *dvl, \
178 const mpw *av, const mpw *avl, \
179 const mpw *bv, const mpw *bvl, \
180 const mpw *mv, size_t n, const mpw *mi) \
181 { \
182 size_t an = avl - av, bn = bvl - bv; \
183 if (n%4 || an != n || bn != n) \
184 simple_mulcore(dv, dvl, av, avl, bv, bvl, mv, n, mi); \
185 else { \
186 mpxmont_mul4_##impl(dv, av, bv, mv, n, mi); \
187 MPX_ZERO(dv + 2*n + 1, dvl); \
188 } \
189 }
190
191 #if CPUFAM_X86
192 MAYBE_MUL4(x86_sse2)
193 #endif
194
195 #if CPUFAM_AMD64
196 MAYBE_MUL4(amd64_sse2)
197 #endif
198
199 static mulcore__functype *pick_mulcore(void)
200 {
201 #if CPUFAM_X86
202 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_x86_sse2,
203 cpu_feature_p(CPUFEAT_X86_SSE2));
204 #endif
205 #if CPUFAM_AMD64
206 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_amd64_sse2,
207 cpu_feature_p(CPUFEAT_X86_SSE2));
208 #endif
209 DISPATCH_PICK_FALLBACK(mpmont_mul, simple_mulcore);
210 }
211
212 /* --- @finish@ --- *
213 *
214 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
215 * *mp *d@ = pointer to mostly-reduced operand
216 *
217 * Returns: ---
218 *
219 * Use: Applies the finishing touches to Montgomery reduction. The
220 * operand @d@ is a multiple of %$R%$ at this point, so it needs
221 * to be shifted down; the result might need a further
222 * subtraction to get it into the right interval; and we may
223 * need to do an additional subtraction if %$d$% is negative.
224 */
225
226 static void finish(mpmont *mm, mp *d)
227 {
228 mpw *dv = d->v, *dvl = d->vl;
229 size_t n = mm->n;
230
231 memmove(dv, dv + n, MPWS(dvl - (dv + n)));
232 dvl -= n;
233
234 if (MPX_UCMP(dv, dvl, >=, mm->m->v, mm->m->vl))
235 mpx_usub(dv, dvl, dv, dvl, mm->m->v, mm->m->vl);
236
237 if (d->f & MP_NEG) {
238 mpx_usub(dv, dvl, mm->m->v, mm->m->vl, dv, dvl);
239 d->f &= ~MP_NEG;
240 }
241
242 d->vl = dvl;
243 MP_SHRINK(d);
244 }
245
246 #endif
247
248 /*----- Reduction and multiplication --------------------------------------*/
249
250 /* --- @mpmont_create@ --- *
251 *
252 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
253 * @mp *m@ = modulus to use
254 *
255 * Returns: Zero on success, nonzero on error.
256 *
257 * Use: Initializes a Montgomery reduction context ready for use.
258 * The argument @m@ must be a positive odd integer.
259 */
260
261 #ifdef MPMONT_DISABLE
262
263 int mpmont_create(mpmont *mm, mp *m)
264 {
265 mp_shrink(m);
266 mm->m = MP_COPY(m);
267 mm->r = MP_ONE;
268 mm->r2 = MP_ONE;
269 mm->mi = MP_ONE;
270 return (0);
271 }
272
273 #else
274
275 int mpmont_create(mpmont *mm, mp *m)
276 {
277 size_t n = MP_LEN(m);
278 mp *r2 = mp_new(2 * n + 1, 0);
279 mp r;
280
281 /* --- Take a copy of the modulus --- */
282
283 if (!MP_POSP(m) || !MP_ODDP(m))
284 return (-1);
285 mm->m = MP_COPY(m);
286
287 /* --- Determine %$R^2$% --- */
288
289 mm->n = n;
290 MPX_ZERO(r2->v, r2->vl - 1);
291 r2->vl[-1] = 1;
292
293 /* --- Find the magic value @mi@ --- */
294
295 mp_build(&r, r2->v + n, r2->vl);
296 mm->mi = mp_modinv(MP_NEW, m, &r);
297 mm->mi = mp_sub(mm->mi, &r, mm->mi);
298 MP_ENSURE(mm->mi, n);
299
300 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
301
302 mm->r2 = MP_NEW;
303 mp_div(0, &mm->r2, r2, m);
304 mm->r = mpmont_reduce(mm, MP_NEW, mm->r2);
305 MP_DROP(r2);
306 return (0);
307 }
308
309 #endif
310
311 /* --- @mpmont_destroy@ --- *
312 *
313 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
314 *
315 * Returns: ---
316 *
317 * Use: Disposes of a context when it's no longer of any use to
318 * anyone.
319 */
320
321 void mpmont_destroy(mpmont *mm)
322 {
323 MP_DROP(mm->m);
324 MP_DROP(mm->r);
325 MP_DROP(mm->r2);
326 MP_DROP(mm->mi);
327 }
328
329 /* --- @mpmont_reduce@ --- *
330 *
331 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
332 * @mp *d@ = destination
333 * @mp *a@ = source, assumed positive
334 *
335 * Returns: Result, %$a R^{-1} \bmod m$%.
336 */
337
338 #ifdef MPMONT_DISABLE
339
340 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
341 {
342 mp_div(0, &d, a, mm->m);
343 return (d);
344 }
345
346 #else
347
348 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
349 {
350 size_t n = mm->n;
351
352 /* --- Check for serious Karatsuba reduction --- */
353
354 if (n > MPMONT_KTHRESH) {
355 mp al;
356 mpw *vl;
357 mp *u;
358
359 if (MP_LEN(a) >= n) vl = a->v + n;
360 else vl = a->vl;
361 mp_build(&al, a->v, vl);
362 u = mp_mul(MP_NEW, &al, mm->mi);
363 if (MP_LEN(u) > n) u->vl = u->v + n;
364 u = mp_mul(u, u, mm->m);
365 d = mp_add(d, a, u);
366 MP_ENSURE(d, n);
367 mp_drop(u);
368 }
369
370 /* --- Otherwise do it the hard way --- */
371
372 else {
373 a = MP_COPY(a);
374 if (d) MP_DROP(d);
375 d = a;
376 MP_DEST(d, 2*mm->n + 1, a->f);
377 redccore(d->v, d->vl, mm->m->v, mm->n, mm->mi->v);
378 }
379
380 /* --- Wrap everything up --- */
381
382 finish(mm, d);
383 return (d);
384 }
385
386 #endif
387
388 /* --- @mpmont_mul@ --- *
389 *
390 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
391 * @mp *d@ = destination
392 * @mp *a, *b@ = sources, assumed positive
393 *
394 * Returns: Result, %$a b R^{-1} \bmod m$%.
395 */
396
397 #ifdef MPMONT_DISABLE
398
399 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
400 {
401 d = mp_mul(d, a, b);
402 mp_div(0, &d, d, mm->m);
403 return (d);
404 }
405
406 #else
407
408 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
409 {
410 size_t n = mm->n;
411
412 if (n > MPMONT_KTHRESH) {
413 d = mp_mul(d, a, b);
414 d = mpmont_reduce(mm, d, d);
415 } else {
416 a = MP_COPY(a); b = MP_COPY(b);
417 MP_DEST(d, 2*n + 1, a->f | b->f | MP_UNDEF);
418 mulcore(d->v, d->vl, a->v, a->vl, b->v, b->vl,
419 mm->m->v, mm->n, mm->mi->v);
420 d->f = ((a->f | b->f) & MP_BURN) | ((a->f ^ b->f) & MP_NEG);
421 finish(mm, d);
422 MP_DROP(a); MP_DROP(b);
423 }
424
425 return (d);
426 }
427
428 #endif
429
430 /*----- Test rig ----------------------------------------------------------*/
431
432 #ifdef TEST_RIG
433
434 static int tcreate(dstr *v)
435 {
436 mp *m = *(mp **)v[0].buf;
437 mp *mi = *(mp **)v[1].buf;
438 mp *r = *(mp **)v[2].buf;
439 mp *r2 = *(mp **)v[3].buf;
440
441 mpmont mm;
442 int ok = 1;
443
444 mpmont_create(&mm, m);
445
446 if (mm.mi->v[0] != mi->v[0]) {
447 fprintf(stderr, "\n*** bad mi: found %lu, expected %lu",
448 (unsigned long)mm.mi->v[0], (unsigned long)mi->v[0]);
449 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
450 fputc('\n', stderr);
451 ok = 0;
452 }
453
454 if (!MP_EQ(mm.r, r)) {
455 fputs("\n*** bad r", stderr);
456 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
457 fputs("\nexpected ", stderr); mp_writefile(r, stderr, 10);
458 fputs("\n found ", stderr); mp_writefile(mm.r, stderr, 10);
459 fputc('\n', stderr);
460 ok = 0;
461 }
462
463 if (!MP_EQ(mm.r2, r2)) {
464 fputs("\n*** bad r2", stderr);
465 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
466 fputs("\nexpected ", stderr); mp_writefile(r2, stderr, 10);
467 fputs("\n found ", stderr); mp_writefile(mm.r2, stderr, 10);
468 fputc('\n', stderr);
469 ok = 0;
470 }
471
472 MP_DROP(m);
473 MP_DROP(mi);
474 MP_DROP(r);
475 MP_DROP(r2);
476 mpmont_destroy(&mm);
477 assert(mparena_count(MPARENA_GLOBAL) == 0);
478 return (ok);
479 }
480
481 static int tmul(dstr *v)
482 {
483 mp *m = *(mp **)v[0].buf;
484 mp *a = *(mp **)v[1].buf;
485 mp *b = *(mp **)v[2].buf;
486 mp *r = *(mp **)v[3].buf;
487 int ok = 1;
488
489 mpmont mm;
490 mpmont_create(&mm, m);
491
492 {
493 mp *qr = mp_mul(MP_NEW, a, b);
494 mp_div(0, &qr, qr, m);
495
496 if (!MP_EQ(qr, r)) {
497 fputs("\n*** classical modmul failed", stderr);
498 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
499 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
500 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
501 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
502 fputs("\nqr = ", stderr); mp_writefile(qr, stderr, 10);
503 fputc('\n', stderr);
504 ok = 0;
505 }
506
507 mp_drop(qr);
508 }
509
510 {
511 mp *ar = mpmont_mul(&mm, MP_NEW, a, mm.r2);
512 mp *br = mpmont_mul(&mm, MP_NEW, b, mm.r2);
513 mp *mr = mpmont_mul(&mm, MP_NEW, ar, br);
514 mr = mpmont_reduce(&mm, mr, mr);
515 if (!MP_EQ(mr, r)) {
516 fputs("\n*** montgomery modmul failed", stderr);
517 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
518 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
519 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
520 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
521 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
522 fputc('\n', stderr);
523 ok = 0;
524 }
525 MP_DROP(ar); MP_DROP(br);
526 mp_drop(mr);
527 }
528
529
530 MP_DROP(m);
531 MP_DROP(a);
532 MP_DROP(b);
533 MP_DROP(r);
534 mpmont_destroy(&mm);
535 assert(mparena_count(MPARENA_GLOBAL) == 0);
536 return ok;
537 }
538
539 static test_chunk tests[] = {
540 { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
541 { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
542 { 0, 0, { 0 } },
543 };
544
545 int main(int argc, char *argv[])
546 {
547 sub_init();
548 test_run(argc, argv, tests, SRCDIR "/t/mpmont");
549 return (0);
550 }
551
552 #endif
553
554 /*----- That's all, folks -------------------------------------------------*/