math/: SSE2-based high-performance multipliers.
[catacomb] / math / mpmont.c
1 /* -*-c-*-
2 *
3 * Montgomery reduction
4 *
5 * (c) 1999 Straylight/Edgeware
6 */
7
8 /*----- Licensing notice --------------------------------------------------*
9 *
10 * This file is part of Catacomb.
11 *
12 * Catacomb is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
16 *
17 * Catacomb is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
21 *
22 * You should have received a copy of the GNU Library General Public
23 * License along with Catacomb; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
25 * MA 02111-1307, USA.
26 */
27
28 /*----- Header files ------------------------------------------------------*/
29
30 #include "config.h"
31 #include "dispatch.h"
32 #include "mp.h"
33 #include "mpmont.h"
34
35 /*----- Tweakables --------------------------------------------------------*/
36
37 /* --- @MPMONT_DISABLE@ --- *
38 *
39 * Replace all the clever Montgomery reduction with good old-fashioned long
40 * division.
41 */
42
43 /* #define MPMONT_DISABLE */
44
45 /*----- Low-level implementation ------------------------------------------*/
46
47 #ifndef MPMONT_DISABLE
48
49 /* --- @redccore@ --- *
50 *
51 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
52 * @const mpw *mv@ = base of modulus %$m$%
53 * @size_t n@ = length of modulus
54 * @const mpw *mi@ = base of REDC coefficient %$m'$%
55 *
56 * Returns: ---
57 *
58 * Use: Let %$a$% be the input operand. Store in %$d$% the value
59 * %$a + (m' a \bmod R) m$%. The destination has space for at
60 * least %$2 n + 1$% words of result.
61 */
62
63 CPU_DISPATCH(static, (void), void, redccore,
64 (mpw *dv, mpw *dvl, const mpw *mv, size_t n, const mpw *mi),
65 (dv, dvl, mv, n, mi), pick_redccore, simple_redccore);
66
67 static void simple_redccore(mpw *dv, mpw *dvl, const mpw *mv,
68 size_t n, const mpw *mi)
69 {
70 mpw mi0 = *mi;
71 size_t i;
72
73 for (i = 0; i < n; i++) {
74 MPX_UMLAN(dv, dvl, mv, mv + n, MPW(*dv*mi0));
75 dv++;
76 }
77 }
78
79 #define MAYBE_REDC4(impl) \
80 extern void mpxmont_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
81 size_t n, const mpw *mi); \
82 static void maybe_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
83 size_t n, const mpw *mi) \
84 { \
85 if (n%4) simple_redccore(dv, dvl, mv, n, mi); \
86 else mpxmont_redc4_##impl(dv, dvl, mv, n, mi); \
87 }
88
89 #if CPUFAM_X86
90 MAYBE_REDC4(x86_sse2)
91 #endif
92
93 static redccore__functype *pick_redccore(void)
94 {
95 #if CPUFAM_X86
96 DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_x86_sse2,
97 cpu_feature_p(CPUFEAT_X86_SSE2));
98 #endif
99 DISPATCH_PICK_FALLBACK(mpmont_reduce, simple_redccore);
100 }
101
102 /* --- @redccore@ --- *
103 *
104 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
105 * @const mpw *av, *avl@ = base and limit of first multiplicand
106 * @const mpw *bv, *bvl@ = base and limit of second multiplicand
107 * @const mpw *mv@ = base of modulus %$m$%
108 * @size_t n@ = length of modulus
109 * @const mpw *mi@ = base of REDC coefficient %$m'$%
110 *
111 * Returns: ---
112 *
113 * Use: Let %$a$% and %$b$% be the multiplicands. Let %$w = a b$%.
114 * Store in %$d$% the value %$a b + (m' a b \bmod R) m$%.
115 */
116
117 CPU_DISPATCH(static, (void), void, mulcore,
118 (mpw *dv, mpw *dvl, const mpw *av, const mpw *avl,
119 const mpw *bv, const mpw *bvl, const mpw *mv,
120 size_t n, const mpw *mi),
121 (dv, dvl, av, avl, bv, bvl, mv, n, mi),
122 pick_mulcore, simple_mulcore);
123
124 static void simple_mulcore(mpw *dv, mpw *dvl,
125 const mpw *av, const mpw *avl,
126 const mpw *bv, const mpw *bvl,
127 const mpw *mv, size_t n, const mpw *mi)
128 {
129 mpw ai, b0, y, mi0 = *mi;
130 const mpw *tv, *tvl;
131 const mpw *mvl = mv + n;
132 size_t i = 0;
133
134 /* --- Initial setup --- */
135
136 MPX_ZERO(dv, dvl);
137 if (avl - av > bvl - bv) {
138 tv = av; av = bv; bv = tv;
139 tvl = avl; avl = bvl; bvl = tvl;
140 }
141 b0 = *bv;
142
143 /* --- Multiply, until we run out of multiplicand --- */
144
145 while (i < n && av < avl) {
146 ai = *av++;
147 y = MPW((*dv + ai*b0)*mi0);
148 MPX_UMLAN(dv, dvl, bv, bvl, ai);
149 MPX_UMLAN(dv, dvl, mv, mvl, y);
150 dv++; i++;
151 }
152
153 /* --- Continue reducing until we run out of modulus --- */
154
155 while (i < n) {
156 y = MPW(*dv*mi0);
157 MPX_UMLAN(dv, dvl, mv, mvl, y);
158 dv++; i++;
159 }
160 }
161
162 #define MAYBE_MUL4(impl) \
163 extern void mpxmont_mul4_##impl(mpw *dv, \
164 const mpw *av, const mpw *bv, \
165 const mpw *mv, \
166 size_t n, const mpw *mi); \
167 static void maybe_mul4_##impl(mpw *dv, mpw *dvl, \
168 const mpw *av, const mpw *avl, \
169 const mpw *bv, const mpw *bvl, \
170 const mpw *mv, size_t n, const mpw *mi) \
171 { \
172 size_t an = avl - av, bn = bvl - bv; \
173 if (n%4 || an != n || bn != n) \
174 simple_mulcore(dv, dvl, av, avl, bv, bvl, mv, n, mi); \
175 else { \
176 mpxmont_mul4_##impl(dv, av, bv, mv, n, mi); \
177 MPX_ZERO(dv + 2*n + 1, dvl); \
178 } \
179 }
180
181 #if CPUFAM_X86
182 MAYBE_MUL4(x86_sse2)
183 #endif
184
185 static mulcore__functype *pick_mulcore(void)
186 {
187 #if CPUFAM_X86
188 DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_x86_sse2,
189 cpu_feature_p(CPUFEAT_X86_SSE2));
190 #endif
191 DISPATCH_PICK_FALLBACK(mpmont_mul, simple_mulcore);
192 }
193
194 /* --- @finish@ --- *
195 *
196 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
197 * *mp *d@ = pointer to mostly-reduced operand
198 *
199 * Returns: ---
200 *
201 * Use: Applies the finishing touches to Montgomery reduction. The
202 * operand @d@ is a multiple of %$R%$ at this point, so it needs
203 * to be shifted down; the result might need a further
204 * subtraction to get it into the right interval; and we may
205 * need to do an additional subtraction if %$d$% is negative.
206 */
207
208 static void finish(mpmont *mm, mp *d)
209 {
210 mpw *dv = d->v, *dvl = d->vl;
211 size_t n = mm->n;
212
213 memmove(dv, dv + n, MPWS(dvl - (dv + n)));
214 dvl -= n;
215
216 if (MPX_UCMP(dv, dvl, >=, mm->m->v, mm->m->vl))
217 mpx_usub(dv, dvl, dv, dvl, mm->m->v, mm->m->vl);
218
219 if (d->f & MP_NEG) {
220 mpx_usub(dv, dvl, mm->m->v, mm->m->vl, dv, dvl);
221 d->f &= ~MP_NEG;
222 }
223
224 d->vl = dvl;
225 MP_SHRINK(d);
226 }
227
228 #endif
229
230 /*----- Reduction and multiplication --------------------------------------*/
231
232 /* --- @mpmont_create@ --- *
233 *
234 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
235 * @mp *m@ = modulus to use
236 *
237 * Returns: Zero on success, nonzero on error.
238 *
239 * Use: Initializes a Montgomery reduction context ready for use.
240 * The argument @m@ must be a positive odd integer.
241 */
242
243 #ifdef MPMONT_DISABLE
244
245 int mpmont_create(mpmont *mm, mp *m)
246 {
247 mp_shrink(m);
248 mm->m = MP_COPY(m);
249 mm->r = MP_ONE;
250 mm->r2 = MP_ONE;
251 mm->mi = MP_ONE;
252 return (0);
253 }
254
255 #else
256
257 int mpmont_create(mpmont *mm, mp *m)
258 {
259 size_t n = MP_LEN(m);
260 mp *r2 = mp_new(2 * n + 1, 0);
261 mp r;
262
263 /* --- Take a copy of the modulus --- */
264
265 if (!MP_POSP(m) || !MP_ODDP(m))
266 return (-1);
267 mm->m = MP_COPY(m);
268
269 /* --- Determine %$R^2$% --- */
270
271 mm->n = n;
272 MPX_ZERO(r2->v, r2->vl - 1);
273 r2->vl[-1] = 1;
274
275 /* --- Find the magic value @mi@ --- */
276
277 mp_build(&r, r2->v + n, r2->vl);
278 mm->mi = mp_modinv(MP_NEW, m, &r);
279 mm->mi = mp_sub(mm->mi, &r, mm->mi);
280 MP_ENSURE(mm->mi, n);
281
282 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
283
284 mm->r2 = MP_NEW;
285 mp_div(0, &mm->r2, r2, m);
286 mm->r = mpmont_reduce(mm, MP_NEW, mm->r2);
287 MP_DROP(r2);
288 return (0);
289 }
290
291 #endif
292
293 /* --- @mpmont_destroy@ --- *
294 *
295 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
296 *
297 * Returns: ---
298 *
299 * Use: Disposes of a context when it's no longer of any use to
300 * anyone.
301 */
302
303 void mpmont_destroy(mpmont *mm)
304 {
305 MP_DROP(mm->m);
306 MP_DROP(mm->r);
307 MP_DROP(mm->r2);
308 MP_DROP(mm->mi);
309 }
310
311 /* --- @mpmont_reduce@ --- *
312 *
313 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
314 * @mp *d@ = destination
315 * @mp *a@ = source, assumed positive
316 *
317 * Returns: Result, %$a R^{-1} \bmod m$%.
318 */
319
320 #ifdef MPMONT_DISABLE
321
322 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
323 {
324 mp_div(0, &d, a, mm->m);
325 return (d);
326 }
327
328 #else
329
330 mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
331 {
332 size_t n = mm->n;
333
334 /* --- Check for serious Karatsuba reduction --- */
335
336 if (n > MPK_THRESH * 3) {
337 mp al;
338 mpw *vl;
339 mp *u;
340
341 if (MP_LEN(a) >= n) vl = a->v + n;
342 else vl = a->vl;
343 mp_build(&al, a->v, vl);
344 u = mp_mul(MP_NEW, &al, mm->mi);
345 if (MP_LEN(u) > n) u->vl = u->v + n;
346 u = mp_mul(u, u, mm->m);
347 d = mp_add(d, a, u);
348 MP_ENSURE(d, n);
349 mp_drop(u);
350 }
351
352 /* --- Otherwise do it the hard way --- */
353
354 else {
355 a = MP_COPY(a);
356 if (d) MP_DROP(d);
357 d = a;
358 MP_DEST(d, 2*mm->n + 1, a->f);
359 redccore(d->v, d->vl, mm->m->v, mm->n, mm->mi->v);
360 }
361
362 /* --- Wrap everything up --- */
363
364 finish(mm, d);
365 return (d);
366 }
367
368 #endif
369
370 /* --- @mpmont_mul@ --- *
371 *
372 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
373 * @mp *d@ = destination
374 * @mp *a, *b@ = sources, assumed positive
375 *
376 * Returns: Result, %$a b R^{-1} \bmod m$%.
377 */
378
379 #ifdef MPMONT_DISABLE
380
381 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
382 {
383 d = mp_mul(d, a, b);
384 mp_div(0, &d, d, mm->m);
385 return (d);
386 }
387
388 #else
389
390 mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
391 {
392 size_t n = mm->n;
393
394 if (n > MPK_THRESH * 3) {
395 d = mp_mul(d, a, b);
396 d = mpmont_reduce(mm, d, d);
397 } else {
398 a = MP_COPY(a); b = MP_COPY(b);
399 MP_DEST(d, 2*n + 1, a->f | b->f | MP_UNDEF);
400 mulcore(d->v, d->vl, a->v, a->vl, b->v, b->vl,
401 mm->m->v, mm->n, mm->mi->v);
402 d->f = ((a->f | b->f) & MP_BURN) | ((a->f ^ b->f) & MP_NEG);
403 finish(mm, d);
404 MP_DROP(a); MP_DROP(b);
405 }
406
407 return (d);
408 }
409
410 #endif
411
412 /*----- Test rig ----------------------------------------------------------*/
413
414 #ifdef TEST_RIG
415
416 static int tcreate(dstr *v)
417 {
418 mp *m = *(mp **)v[0].buf;
419 mp *mi = *(mp **)v[1].buf;
420 mp *r = *(mp **)v[2].buf;
421 mp *r2 = *(mp **)v[3].buf;
422
423 mpmont mm;
424 int ok = 1;
425
426 mpmont_create(&mm, m);
427
428 if (mm.mi->v[0] != mi->v[0]) {
429 fprintf(stderr, "\n*** bad mi: found %lu, expected %lu",
430 (unsigned long)mm.mi->v[0], (unsigned long)mi->v[0]);
431 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
432 fputc('\n', stderr);
433 ok = 0;
434 }
435
436 if (!MP_EQ(mm.r, r)) {
437 fputs("\n*** bad r", stderr);
438 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
439 fputs("\nexpected ", stderr); mp_writefile(r, stderr, 10);
440 fputs("\n found ", stderr); mp_writefile(mm.r, stderr, 10);
441 fputc('\n', stderr);
442 ok = 0;
443 }
444
445 if (!MP_EQ(mm.r2, r2)) {
446 fputs("\n*** bad r2", stderr);
447 fputs("\nm = ", stderr); mp_writefile(m, stderr, 10);
448 fputs("\nexpected ", stderr); mp_writefile(r2, stderr, 10);
449 fputs("\n found ", stderr); mp_writefile(mm.r2, stderr, 10);
450 fputc('\n', stderr);
451 ok = 0;
452 }
453
454 MP_DROP(m);
455 MP_DROP(mi);
456 MP_DROP(r);
457 MP_DROP(r2);
458 mpmont_destroy(&mm);
459 assert(mparena_count(MPARENA_GLOBAL) == 0);
460 return (ok);
461 }
462
463 static int tmul(dstr *v)
464 {
465 mp *m = *(mp **)v[0].buf;
466 mp *a = *(mp **)v[1].buf;
467 mp *b = *(mp **)v[2].buf;
468 mp *r = *(mp **)v[3].buf;
469 int ok = 1;
470
471 mpmont mm;
472 mpmont_create(&mm, m);
473
474 {
475 mp *qr = mp_mul(MP_NEW, a, b);
476 mp_div(0, &qr, qr, m);
477
478 if (!MP_EQ(qr, r)) {
479 fputs("\n*** classical modmul failed", stderr);
480 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
481 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
482 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
483 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
484 fputs("\nqr = ", stderr); mp_writefile(qr, stderr, 10);
485 fputc('\n', stderr);
486 ok = 0;
487 }
488
489 mp_drop(qr);
490 }
491
492 {
493 mp *ar = mpmont_mul(&mm, MP_NEW, a, mm.r2);
494 mp *br = mpmont_mul(&mm, MP_NEW, b, mm.r2);
495 mp *mr = mpmont_mul(&mm, MP_NEW, ar, br);
496 mr = mpmont_reduce(&mm, mr, mr);
497 if (!MP_EQ(mr, r)) {
498 fputs("\n*** montgomery modmul failed", stderr);
499 fputs("\n m = ", stderr); mp_writefile(m, stderr, 10);
500 fputs("\n a = ", stderr); mp_writefile(a, stderr, 10);
501 fputs("\n b = ", stderr); mp_writefile(b, stderr, 10);
502 fputs("\n r = ", stderr); mp_writefile(r, stderr, 10);
503 fputs("\nmr = ", stderr); mp_writefile(mr, stderr, 10);
504 fputc('\n', stderr);
505 ok = 0;
506 }
507 MP_DROP(ar); MP_DROP(br);
508 mp_drop(mr);
509 }
510
511
512 MP_DROP(m);
513 MP_DROP(a);
514 MP_DROP(b);
515 MP_DROP(r);
516 mpmont_destroy(&mm);
517 assert(mparena_count(MPARENA_GLOBAL) == 0);
518 return ok;
519 }
520
521 static test_chunk tests[] = {
522 { "create", tcreate, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
523 { "mul", tmul, { &type_mp, &type_mp, &type_mp, &type_mp, 0 } },
524 { 0, 0, { 0 } },
525 };
526
527 int main(int argc, char *argv[])
528 {
529 sub_init();
530 test_run(argc, argv, tests, SRCDIR "/t/mpmont");
531 return (0);
532 }
533
534 #endif
535
536 /*----- That's all, folks -------------------------------------------------*/