# define CPUID1D_SSE2 (1u << 26)
# define CPUID1D_FXSR (1u << 24)
# define CPUID1C_AESNI (1u << 25)
+# define CPUID1C_RDRAND (1u << 30)
struct cpuid { unsigned a, b, c, d; };
* that the necessary entry types are defined. This is primarily ordered by
* entry type to minimize duplication.
*/
+#if defined(AT_HWCAP) && CPUFAM_ARMEL
+# define WANT_ANY 1
+# define WANT_AT_HWCAP(_) _(AT_HWCAP, u, hwcap)
+#endif
/* If we couldn't find any interesting entries then we can switch all of this
* machinery off. Also do that if we have no means for atomic updates.
* intends to satisfy from the auxiliary vector. Each entry contains a
* feature name suffix, and the token name (for `check_env').
*/
+#if CPUFAM_ARMEL
+# define WANTAUX(_) \
+ WANT_AT_HWCAP(_)
+# define CAPMAP(_) \
+ _(ARM_VFP, "arm:vfp") \
+ _(ARM_NEON, "arm:neon") \
+ _(ARM_V4, "arm:v4") \
+ _(ARM_D32, "arm:d32")
+#endif
/* Build the bitmask for `hwcaps' from the `CAPMAP' list. */
enum {
#define CAP__SWITCH(type, ubranch, slot) \
case type: probed.slot = a->value.ubranch; break;
WANTAUX(CAP__SWITCH)
+ case AT_NULL: goto clean;
}
}
/* Each CPU family now has to pick through what was found and stashed in
* `probed', and set the appropriate flag bits in `hw'.
*/
+#if CPUFAM_ARMEL
+ if (probed.hwcap & HWCAP_VFPv3) hw |= HF_ARM_VFP;
+ if (probed.hwcap & HWCAP_NEON) hw |= HF_ARM_NEON;
+ if (probed.hwcap & HWCAP_VFPD32) hw |= HF_ARM_D32;
+ if (probed.hwcap & HWCAP_VFPv4) hw |= HF_ARM_V4;
+#endif
/* Store the bitmask of features we probed for everyone to see. */
DISPATCH_STORE(hwcaps, hw);
CASE_CPUFEAT(X86_AESNI, "x86:aesni",
xmm_registers_available_p() &&
cpuid_features_p(CPUID1D_SSE2, CPUID1C_AESNI));
+ CASE_CPUFEAT(X86_RDRAND, "x86:rdrand",
+ cpuid_features_p(0, CPUID1C_RDRAND));
#endif
#ifdef CAPMAP
# define FEATP__CASE(feat, tok) \
- CASE_CPUFEAT(feat, tok, get_hwcaps & HF_##feat)
+ CASE_CPUFEAT(feat, tok, get_hwcaps() & HF_##feat)
CAPMAP(FEATP__CASE)
#undef FEATP__CASE
#endif