}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
void arm64_set_ssbd_mitigation(bool state)
{
+ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+ pr_info_once("SSBD disabled by kernel configuration\n");
+ return;
+ }
+
if (this_cpu_has_cap(ARM64_SSBS)) {
if (state)
asm volatile(SET_PSTATE_SSBS(0));
return required;
}
-#endif /* CONFIG_ARM64_SSBD */
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.cpu_enable = enable_smccc_arch_workaround_1,
},
#endif
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypass Disable",
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.capability = ARM64_SSBD,
.matches = has_ssbd_mitigation,
},
-#endif
{
}
};