#if CPUFAM_X86
MAYBE_REDC4(x86_sse2)
+ MAYBE_REDC4(x86_avx)
#endif
#if CPUFAM_AMD64
MAYBE_REDC4(amd64_sse2)
+ MAYBE_REDC4(amd64_avx)
+#endif
+
+#if CPUFAM_ARMEL
+ MAYBE_REDC4(arm_neon)
+#endif
+
+#if CPUFAM_ARM64
+ MAYBE_REDC4(arm64_simd)
#endif
static redccore__functype *pick_redccore(void)
{
#if CPUFAM_X86
+ DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_x86_avx,
+ cpu_feature_p(CPUFEAT_X86_AVX));
DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_x86_sse2,
cpu_feature_p(CPUFEAT_X86_SSE2));
#endif
#if CPUFAM_AMD64
+ DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_amd64_avx,
+ cpu_feature_p(CPUFEAT_X86_AVX));
DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_amd64_sse2,
cpu_feature_p(CPUFEAT_X86_SSE2));
#endif
+#if CPUFAM_ARMEL
+ DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_arm_neon,
+ cpu_feature_p(CPUFEAT_ARM_NEON));
+#endif
+#if CPUFAM_ARM64
+ DISPATCH_PICK_COND(mpmont_reduce, maybe_redc4_arm64_simd,
+ cpu_feature_p(CPUFEAT_ARM_NEON));
+#endif
DISPATCH_PICK_FALLBACK(mpmont_reduce, simple_redccore);
}
-/* --- @redccore@ --- *
+/* --- @mulcore@ --- *
*
* Arguments: @mpw *dv, *dvl@ = base and limit of source/destination
* @const mpw *av, *avl@ = base and limit of first multiplicand
#if CPUFAM_X86
MAYBE_MUL4(x86_sse2)
+ MAYBE_MUL4(x86_avx)
#endif
#if CPUFAM_AMD64
MAYBE_MUL4(amd64_sse2)
+ MAYBE_MUL4(amd64_avx)
+#endif
+
+#if CPUFAM_ARMEL
+ MAYBE_MUL4(arm_neon)
+#endif
+
+#if CPUFAM_ARM64
+ MAYBE_MUL4(arm64_simd)
#endif
static mulcore__functype *pick_mulcore(void)
{
#if CPUFAM_X86
+ DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_x86_avx,
+ cpu_feature_p(CPUFEAT_X86_AVX));
DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_x86_sse2,
cpu_feature_p(CPUFEAT_X86_SSE2));
#endif
#if CPUFAM_AMD64
+ DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_amd64_avx,
+ cpu_feature_p(CPUFEAT_X86_AVX));
DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_amd64_sse2,
cpu_feature_p(CPUFEAT_X86_SSE2));
#endif
+#if CPUFAM_ARMEL
+ DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_arm_neon,
+ cpu_feature_p(CPUFEAT_ARM_NEON));
+#endif
+#if CPUFAM_ARM64
+ DISPATCH_PICK_COND(mpmont_mul, maybe_mul4_arm64_simd,
+ cpu_feature_p(CPUFEAT_ARM_NEON));
+#endif
DISPATCH_PICK_FALLBACK(mpmont_mul, simple_mulcore);
}
/* --- @finish@ --- *
*
- * Arguments: @mpmont *mm@ = pointer to a Montgomery reduction context
+ * Arguments: @const mpmont *mm@ = pointer to a Montgomery reduction
+ * context
* *mp *d@ = pointer to mostly-reduced operand
*
* Returns: ---
* need to do an additional subtraction if %$d$% is negative.
*/
-static void finish(mpmont *mm, mp *d)
+static void finish(const mpmont *mm, mp *d)
{
mpw *dv = d->v, *dvl = d->vl;
size_t n = mm->n;
/* --- @mpmont_reduce@ --- *
*
- * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
+ * Arguments: @const mpmont *mm@ = pointer to Montgomery reduction context
* @mp *d@ = destination
* @mp *a@ = source, assumed positive
*
#ifdef MPMONT_DISABLE
-mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
+mp *mpmont_reduce(const mpmont *mm, mp *d, mp *a)
{
mp_div(0, &d, a, mm->m);
return (d);
#else
-mp *mpmont_reduce(mpmont *mm, mp *d, mp *a)
+mp *mpmont_reduce(const mpmont *mm, mp *d, mp *a)
{
size_t n = mm->n;
/* --- @mpmont_mul@ --- *
*
- * Arguments: @mpmont *mm@ = pointer to Montgomery reduction context
+ * Arguments: @const mpmont *mm@ = pointer to Montgomery reduction context
* @mp *d@ = destination
* @mp *a, *b@ = sources, assumed positive
*
#ifdef MPMONT_DISABLE
-mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
+mp *mpmont_mul(const mpmont *mm, mp *d, mp *a, mp *b)
{
d = mp_mul(d, a, b);
mp_div(0, &d, d, mm->m);
#else
-mp *mpmont_mul(mpmont *mm, mp *d, mp *a, mp *b)
+mp *mpmont_mul(const mpmont *mm, mp *d, mp *a, mp *b)
{
size_t n = mm->n;
#ifdef TEST_RIG
+#ifdef ENABLE_ASM_DEBUG
+# include "regdump.h"
+#endif
+
static int tcreate(dstr *v)
{
mp *m = *(mp **)v[0].buf;
mp_drop(mr);
}
-
MP_DROP(m);
MP_DROP(a);
MP_DROP(b);
int main(int argc, char *argv[])
{
sub_init();
+#ifdef ENABLE_ASM_DEBUG
+ regdump_init();
+#endif
test_run(argc, argv, tests, SRCDIR "/t/mpmont");
return (0);
}