5 * (c) 1999 Straylight/Edgeware
8 /*----- Licensing notice --------------------------------------------------*
10 * This file is part of Catacomb.
12 * Catacomb is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU Library General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
17 * Catacomb is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Library General Public License for more details.
22 * You should have received a copy of the GNU Library General Public
23 * License along with Catacomb; if not, write to the Free
24 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
28 /*----- Header files ------------------------------------------------------*/
35 /*----- Tweakables --------------------------------------------------------*/
37 /* --- @MPMONT_DISABLE@ --- *
39 * Replace all the clever Montgomery reduction with good old-fashioned long
43 /* #define MPMONT_DISABLE */
45 #define MPMONT_KTHRESH (16*MPK_THRESH)
47 /*----- Low-level implementation ------------------------------------------*/
49 #ifndef MPMONT_DISABLE
51 /* --- @redccore@ --- *
53 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
54 * @const mpw *mv@ = base of modulus %$m$%
55 * @size_t n@ = length of modulus
56 * @const mpw *mi@ = base of REDC coefficient %$m'$%
60 * Use: Let %$a$% be the input operand. Store in %$d$% the value
61 * %$a + (m' a \bmod R) m$%. The destination has space for at
62 * least %$2 n + 1$% words of result.
65 CPU_DISPATCH(static, (void), void, redccore
,
66 (mpw
*dv
, mpw
*dvl
, const mpw
*mv
, size_t n
, const mpw
*mi
),
67 (dv
, dvl
, mv
, n
, mi
), pick_redccore
, simple_redccore
);
69 static void simple_redccore(mpw
*dv
, mpw
*dvl
, const mpw
*mv
,
70 size_t n
, const mpw
*mi
)
75 for (i
= 0; i
< n
; i
++) {
76 MPX_UMLAN(dv
, dvl
, mv
, mv
+ n
, MPW(*dv
*mi0
));
81 #define MAYBE_REDC4(impl) \
82 extern void mpxmont_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
83 size_t n, const mpw *mi); \
84 static void maybe_redc4_##impl(mpw *dv, mpw *dvl, const mpw *mv, \
85 size_t n, const mpw *mi) \
87 if (n%4) simple_redccore(dv, dvl, mv, n, mi); \
88 else mpxmont_redc4_##impl(dv, dvl, mv, n, mi); \
97 MAYBE_REDC4(amd64_sse2
)
98 MAYBE_REDC4(amd64_avx
)
101 static redccore__functype
*pick_redccore(void)
104 DISPATCH_PICK_COND(mpmont_reduce
, maybe_redc4_x86_avx
,
105 cpu_feature_p(CPUFEAT_X86_AVX
));
106 DISPATCH_PICK_COND(mpmont_reduce
, maybe_redc4_x86_sse2
,
107 cpu_feature_p(CPUFEAT_X86_SSE2
));
110 DISPATCH_PICK_COND(mpmont_reduce
, maybe_redc4_amd64_avx
,
111 cpu_feature_p(CPUFEAT_X86_AVX
));
112 DISPATCH_PICK_COND(mpmont_reduce
, maybe_redc4_amd64_sse2
,
113 cpu_feature_p(CPUFEAT_X86_SSE2
));
115 DISPATCH_PICK_FALLBACK(mpmont_reduce
, simple_redccore
);
118 /* --- @redccore@ --- *
120 * Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
121 * @const mpw *av, *avl@ = base and limit of first multiplicand
122 * @const mpw *bv, *bvl@ = base and limit of second multiplicand
123 * @const mpw *mv@ = base of modulus %$m$%
124 * @size_t n@ = length of modulus
125 * @const mpw *mi@ = base of REDC coefficient %$m'$%
129 * Use: Let %$a$% and %$b$% be the multiplicands. Let %$w = a b$%.
130 * Store in %$d$% the value %$a b + (m' a b \bmod R) m$%.
133 CPU_DISPATCH(static, (void), void, mulcore
,
134 (mpw
*dv
, mpw
*dvl
, const mpw
*av
, const mpw
*avl
,
135 const mpw
*bv
, const mpw
*bvl
, const mpw
*mv
,
136 size_t n
, const mpw
*mi
),
137 (dv
, dvl
, av
, avl
, bv
, bvl
, mv
, n
, mi
),
138 pick_mulcore
, simple_mulcore
);
140 static void simple_mulcore(mpw
*dv
, mpw
*dvl
,
141 const mpw
*av
, const mpw
*avl
,
142 const mpw
*bv
, const mpw
*bvl
,
143 const mpw
*mv
, size_t n
, const mpw
*mi
)
145 mpw ai
, b0
, y
, mi0
= *mi
;
147 const mpw
*mvl
= mv
+ n
;
150 /* --- Initial setup --- */
153 if (avl
- av
> bvl
- bv
) {
154 tv
= av
; av
= bv
; bv
= tv
;
155 tvl
= avl
; avl
= bvl
; bvl
= tvl
;
159 /* --- Multiply, until we run out of multiplicand --- */
161 while (i
< n
&& av
< avl
) {
163 y
= MPW((*dv
+ ai
*b0
)*mi0
);
164 MPX_UMLAN(dv
, dvl
, bv
, bvl
, ai
);
165 MPX_UMLAN(dv
, dvl
, mv
, mvl
, y
);
169 /* --- Continue reducing until we run out of modulus --- */
173 MPX_UMLAN(dv
, dvl
, mv
, mvl
, y
);
178 #define MAYBE_MUL4(impl) \
179 extern void mpxmont_mul4_##impl(mpw *dv, \
180 const mpw *av, const mpw *bv, \
182 size_t n, const mpw *mi); \
183 static void maybe_mul4_##impl(mpw *dv, mpw *dvl, \
184 const mpw *av, const mpw *avl, \
185 const mpw *bv, const mpw *bvl, \
186 const mpw *mv, size_t n, const mpw *mi) \
188 size_t an = avl - av, bn = bvl - bv; \
189 if (n%4 || an != n || bn != n) \
190 simple_mulcore(dv, dvl, av, avl, bv, bvl, mv, n, mi); \
192 mpxmont_mul4_##impl(dv, av, bv, mv, n, mi); \
193 MPX_ZERO(dv + 2*n + 1, dvl); \
203 MAYBE_MUL4(amd64_sse2
)
204 MAYBE_MUL4(amd64_avx
)
207 static mulcore__functype
*pick_mulcore(void)
210 DISPATCH_PICK_COND(mpmont_mul
, maybe_mul4_x86_avx
,
211 cpu_feature_p(CPUFEAT_X86_AVX
));
212 DISPATCH_PICK_COND(mpmont_mul
, maybe_mul4_x86_sse2
,
213 cpu_feature_p(CPUFEAT_X86_SSE2
));
216 DISPATCH_PICK_COND(mpmont_mul
, maybe_mul4_amd64_avx
,
217 cpu_feature_p(CPUFEAT_X86_AVX
));
218 DISPATCH_PICK_COND(mpmont_mul
, maybe_mul4_amd64_sse2
,
219 cpu_feature_p(CPUFEAT_X86_SSE2
));
221 DISPATCH_PICK_FALLBACK(mpmont_mul
, simple_mulcore
);
224 /* --- @finish@ --- *
226 * Arguments: @const mpmont *mm@ = pointer to a Montgomery reduction
228 * *mp *d@ = pointer to mostly-reduced operand
232 * Use: Applies the finishing touches to Montgomery reduction. The
233 * operand @d@ is a multiple of %$R%$ at this point, so it needs
234 * to be shifted down; the result might need a further
235 * subtraction to get it into the right interval; and we may
236 * need to do an additional subtraction if %$d$% is negative.
239 static void finish(const mpmont
*mm
, mp
*d
)
241 mpw
*dv
= d
->v
, *dvl
= d
->vl
;
244 memmove(dv
, dv
+ n
, MPWS(dvl
- (dv
+ n
)));
247 if (MPX_UCMP(dv
, dvl
, >=, mm
->m
->v
, mm
->m
->vl
))
248 mpx_usub(dv
, dvl
, dv
, dvl
, mm
->m
->v
, mm
->m
->vl
);
251 mpx_usub(dv
, dvl
, mm
->m
->v
, mm
->m
->vl
, dv
, dvl
);
261 /*----- Reduction and multiplication --------------------------------------*/
263 /* --- @mpmont_create@ --- *
265 * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
266 * @mp *m@ = modulus to use
268 * Returns: Zero on success, nonzero on error.
270 * Use: Initializes a Montgomery reduction context ready for use.
271 * The argument @m@ must be a positive odd integer.
274 #ifdef MPMONT_DISABLE
276 int mpmont_create(mpmont
*mm
, mp
*m
)
288 int mpmont_create(mpmont
*mm
, mp
*m
)
290 size_t n
= MP_LEN(m
);
291 mp
*r2
= mp_new(2 * n
+ 1, 0);
294 /* --- Take a copy of the modulus --- */
296 if (!MP_POSP(m
) || !MP_ODDP(m
))
300 /* --- Determine %$R^2$% --- */
303 MPX_ZERO(r2
->v
, r2
->vl
- 1);
306 /* --- Find the magic value @mi@ --- */
308 mp_build(&r
, r2
->v
+ n
, r2
->vl
);
309 mm
->mi
= mp_modinv(MP_NEW
, m
, &r
);
310 mm
->mi
= mp_sub(mm
->mi
, &r
, mm
->mi
);
311 MP_ENSURE(mm
->mi
, n
);
313 /* --- Discover the values %$R \bmod m$% and %$R^2 \bmod m$% --- */
316 mp_div(0, &mm
->r2
, r2
, m
);
317 mm
->r
= mpmont_reduce(mm
, MP_NEW
, mm
->r2
);
324 /* --- @mpmont_destroy@ --- *
326 * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
330 * Use: Disposes of a context when it's no longer of any use to
334 void mpmont_destroy(mpmont
*mm
)
342 /* --- @mpmont_reduce@ --- *
344 * Arguments: @const mpmont *mm@ = pointer to Montgomery reduction context
345 * @mp *d@ = destination
346 * @mp *a@ = source, assumed positive
348 * Returns: Result, %$a R^{-1} \bmod m$%.
351 #ifdef MPMONT_DISABLE
353 mp
*mpmont_reduce(const mpmont
*mm
, mp
*d
, mp
*a
)
355 mp_div(0, &d
, a
, mm
->m
);
361 mp
*mpmont_reduce(const mpmont
*mm
, mp
*d
, mp
*a
)
365 /* --- Check for serious Karatsuba reduction --- */
367 if (n
> MPMONT_KTHRESH
) {
372 if (MP_LEN(a
) >= n
) vl
= a
->v
+ n
;
374 mp_build(&al
, a
->v
, vl
);
375 u
= mp_mul(MP_NEW
, &al
, mm
->mi
);
376 if (MP_LEN(u
) > n
) u
->vl
= u
->v
+ n
;
377 u
= mp_mul(u
, u
, mm
->m
);
383 /* --- Otherwise do it the hard way --- */
389 MP_DEST(d
, 2*mm
->n
+ 1, a
->f
);
390 redccore(d
->v
, d
->vl
, mm
->m
->v
, mm
->n
, mm
->mi
->v
);
393 /* --- Wrap everything up --- */
401 /* --- @mpmont_mul@ --- *
403 * Arguments: @const mpmont *mm@ = pointer to Montgomery reduction context
404 * @mp *d@ = destination
405 * @mp *a, *b@ = sources, assumed positive
407 * Returns: Result, %$a b R^{-1} \bmod m$%.
410 #ifdef MPMONT_DISABLE
412 mp
*mpmont_mul(const mpmont
*mm
, mp
*d
, mp
*a
, mp
*b
)
415 mp_div(0, &d
, d
, mm
->m
);
421 mp
*mpmont_mul(const mpmont
*mm
, mp
*d
, mp
*a
, mp
*b
)
425 if (n
> MPMONT_KTHRESH
) {
427 d
= mpmont_reduce(mm
, d
, d
);
429 a
= MP_COPY(a
); b
= MP_COPY(b
);
430 MP_DEST(d
, 2*n
+ 1, a
->f
| b
->f
| MP_UNDEF
);
431 mulcore(d
->v
, d
->vl
, a
->v
, a
->vl
, b
->v
, b
->vl
,
432 mm
->m
->v
, mm
->n
, mm
->mi
->v
);
433 d
->f
= ((a
->f
| b
->f
) & MP_BURN
) | ((a
->f
^ b
->f
) & MP_NEG
);
435 MP_DROP(a
); MP_DROP(b
);
443 /*----- Test rig ----------------------------------------------------------*/
447 static int tcreate(dstr
*v
)
449 mp
*m
= *(mp
**)v
[0].buf
;
450 mp
*mi
= *(mp
**)v
[1].buf
;
451 mp
*r
= *(mp
**)v
[2].buf
;
452 mp
*r2
= *(mp
**)v
[3].buf
;
457 mpmont_create(&mm
, m
);
459 if (mm
.mi
->v
[0] != mi
->v
[0]) {
460 fprintf(stderr
, "\n*** bad mi: found %lu, expected %lu",
461 (unsigned long)mm
.mi
->v
[0], (unsigned long)mi
->v
[0]);
462 fputs("\nm = ", stderr
); mp_writefile(m
, stderr
, 10);
467 if (!MP_EQ(mm
.r
, r
)) {
468 fputs("\n*** bad r", stderr
);
469 fputs("\nm = ", stderr
); mp_writefile(m
, stderr
, 10);
470 fputs("\nexpected ", stderr
); mp_writefile(r
, stderr
, 10);
471 fputs("\n found ", stderr
); mp_writefile(mm
.r
, stderr
, 10);
476 if (!MP_EQ(mm
.r2
, r2
)) {
477 fputs("\n*** bad r2", stderr
);
478 fputs("\nm = ", stderr
); mp_writefile(m
, stderr
, 10);
479 fputs("\nexpected ", stderr
); mp_writefile(r2
, stderr
, 10);
480 fputs("\n found ", stderr
); mp_writefile(mm
.r2
, stderr
, 10);
490 assert(mparena_count(MPARENA_GLOBAL
) == 0);
494 static int tmul(dstr
*v
)
496 mp
*m
= *(mp
**)v
[0].buf
;
497 mp
*a
= *(mp
**)v
[1].buf
;
498 mp
*b
= *(mp
**)v
[2].buf
;
499 mp
*r
= *(mp
**)v
[3].buf
;
503 mpmont_create(&mm
, m
);
506 mp
*qr
= mp_mul(MP_NEW
, a
, b
);
507 mp_div(0, &qr
, qr
, m
);
510 fputs("\n*** classical modmul failed", stderr
);
511 fputs("\n m = ", stderr
); mp_writefile(m
, stderr
, 10);
512 fputs("\n a = ", stderr
); mp_writefile(a
, stderr
, 10);
513 fputs("\n b = ", stderr
); mp_writefile(b
, stderr
, 10);
514 fputs("\n r = ", stderr
); mp_writefile(r
, stderr
, 10);
515 fputs("\nqr = ", stderr
); mp_writefile(qr
, stderr
, 10);
524 mp
*ar
= mpmont_mul(&mm
, MP_NEW
, a
, mm
.r2
);
525 mp
*br
= mpmont_mul(&mm
, MP_NEW
, b
, mm
.r2
);
526 mp
*mr
= mpmont_mul(&mm
, MP_NEW
, ar
, br
);
527 mr
= mpmont_reduce(&mm
, mr
, mr
);
529 fputs("\n*** montgomery modmul failed", stderr
);
530 fputs("\n m = ", stderr
); mp_writefile(m
, stderr
, 10);
531 fputs("\n a = ", stderr
); mp_writefile(a
, stderr
, 10);
532 fputs("\n b = ", stderr
); mp_writefile(b
, stderr
, 10);
533 fputs("\n r = ", stderr
); mp_writefile(r
, stderr
, 10);
534 fputs("\nmr = ", stderr
); mp_writefile(mr
, stderr
, 10);
538 MP_DROP(ar
); MP_DROP(br
);
548 assert(mparena_count(MPARENA_GLOBAL
) == 0);
552 static test_chunk tests
[] = {
553 { "create", tcreate
, { &type_mp
, &type_mp
, &type_mp
, &type_mp
, 0 } },
554 { "mul", tmul
, { &type_mp
, &type_mp
, &type_mp
, &type_mp
, 0 } },
558 int main(int argc
, char *argv
[])
561 test_run(argc
, argv
, tests
, SRCDIR
"/t/mpmont");
567 /*----- That's all, folks -------------------------------------------------*/