arm64: Always enable ssb vulnerability detection
authorJeremy Linton <jeremy.linton@arm.com>
Thu, 24 Oct 2019 12:48:24 +0000 (14:48 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Oct 2019 08:17:26 +0000 (09:17 +0100)
[ Upstream commit d42281b6e49510f078ace15a8ea10f71e6262581 ]

Ensure we are always able to detect whether or not the CPU is affected
by SSB, so that we can later advertise this to userspace.

Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
Reviewed-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
[will: Use IS_ENABLED instead of #ifdef]
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/cpufeature.h
arch/arm64/kernel/cpu_errata.c

index 9776c19d03d45f2a383491ca78df4fbfa89a8a36..166f81b7afee65c9b462ca7bdad1510c84acb012 100644 (file)
@@ -493,11 +493,7 @@ static inline int arm64_get_ssbd_state(void)
 #endif
 }
 
-#ifdef CONFIG_ARM64_SSBD
 void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
 
 #endif /* __ASSEMBLY__ */
 
index 8b0a141bd01d45a790451abb7396c3da9afc6f27..86c4f4e514274b50c29ed969551a8ca4f3e19086 100644 (file)
@@ -231,7 +231,6 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
 }
 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
-#ifdef CONFIG_ARM64_SSBD
 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
@@ -304,6 +303,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 
 void arm64_set_ssbd_mitigation(bool state)
 {
+       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+               pr_info_once("SSBD disabled by kernel configuration\n");
+               return;
+       }
+
        if (this_cpu_has_cap(ARM64_SSBS)) {
                if (state)
                        asm volatile(SET_PSTATE_SSBS(0));
@@ -423,7 +427,6 @@ out_printmsg:
 
        return required;
 }
-#endif /* CONFIG_ARM64_SSBD */
 
 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)      \
        .matches = is_affected_midr_range,                      \
@@ -627,14 +630,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .cpu_enable = enable_smccc_arch_workaround_1,
        },
 #endif
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypass Disable",
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .capability = ARM64_SSBD,
                .matches = has_ssbd_mitigation,
        },
-#endif
        {
        }
 };