powerpc/book3s64: Fix link stack flush on context switch
authorMichael Ellerman <mpe@ellerman.id.au>
Wed, 13 Nov 2019 10:05:41 +0000 (21:05 +1100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 Dec 2019 08:14:36 +0000 (09:14 +0100)
commit 39e72bf96f5847ba87cc5bd7a3ce0fed813dc9ad upstream.

In commit ee13cb249fab ("powerpc/64s: Add support for software count
cache flush"), I added support for software to flush the count
cache (indirect branch cache) on context switch if firmware told us
that was the required mitigation for Spectre v2.

As part of that code we also added a software flush of the link
stack (return address stack), which protects against Spectre-RSB
between user processes.

That is all correct for CPUs that activate that mitigation, which is
currently Power9 Nimbus DD2.3.

What I got wrong is that on older CPUs, where firmware has disabled
the count cache, we also need to flush the link stack on context
switch.

To fix it we create a new feature bit which is not set by firmware,
which tells us we need to flush the link stack. We set that when
firmware tells us that either of the existing Spectre v2 mitigations
are enabled.

Then we adjust the patching code so that if we see that feature bit we
enable the link stack flush. If we're also told to flush the count
cache in software then we fall through and do that also.

On the older CPUs we don't need to do do the software count cache
flush, firmware has disabled it, so in that case we patch in an early
return after the link stack flush.

The naming of some of the functions is awkward after this patch,
because they're called "count cache" but they also do link stack. But
we'll fix that up in a later commit to ease backporting.

This is the fix for CVE-2019-18660.

Reported-by: Anthony Steinhauser <asteinhauser@google.com>
Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush")
Cc: stable@vger.kernel.org # v4.4+
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
[dja: straightforward backport to v4.14]
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/powerpc/include/asm/asm-prototypes.h
arch/powerpc/include/asm/security_features.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/security.c

index ba4c75062d49ca94becd05b2b8a735f7ec46db7d..fb5f911b0d9101c98229410be8091db844974c04 100644 (file)
@@ -129,6 +129,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
 /* Patch sites */
 extern s32 patch__call_flush_count_cache;
 extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
 
 extern long flush_count_cache;
 
index 759597bf0fd867bd6d4c151acb8acce7f7f3ff6b..ccf44c135389a111023a244498f9fab60a8583f5 100644 (file)
@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
 // Software required to flush count cache on context switch
 #define SEC_FTR_FLUSH_COUNT_CACHE      0x0000000000000400ull
 
+// Software required to flush link stack on context switch
+#define SEC_FTR_FLUSH_LINK_STACK       0x0000000000001000ull
+
 
 // Features enabled by default
 #define SEC_FTR_DEFAULT \
index 12395895b9aa51168b6af8fae3a8623e1ff831c4..02a0bf52aec071f6bc8a2f4025a9e0273813b098 100644 (file)
@@ -524,6 +524,7 @@ flush_count_cache:
        /* Save LR into r9 */
        mflr    r9
 
+       // Flush the link stack
        .rept 64
        bl      .+4
        .endr
@@ -533,6 +534,11 @@ flush_count_cache:
        .balign 32
        /* Restore LR */
 1:     mtlr    r9
+
+       // If we're just flushing the link stack, return here
+3:     nop
+       patch_site 3b patch__flush_link_stack_return
+
        li      r9,0x7fff
        mtctr   r9
 
index cc0aac4bde7556164d7b55db7b1abbf4973e9ee0..88e582d2bad7424b98d15517126ec24c3736c49f 100644 (file)
@@ -24,6 +24,7 @@ enum count_cache_flush_type {
        COUNT_CACHE_FLUSH_HW    = 0x4,
 };
 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+static bool link_stack_flush_enabled;
 
 bool barrier_nospec_enabled;
 static bool no_nospec;
@@ -204,11 +205,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
 
                if (ccd)
                        seq_buf_printf(&s, "Indirect branch cache disabled");
+
+               if (link_stack_flush_enabled)
+                       seq_buf_printf(&s, ", Software link stack flush");
+
        } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
                seq_buf_printf(&s, "Mitigation: Software count cache flush");
 
                if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
                        seq_buf_printf(&s, " (hardware accelerated)");
+
+               if (link_stack_flush_enabled)
+                       seq_buf_printf(&s, ", Software link stack flush");
+
        } else if (btb_flush_enabled) {
                seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
        } else {
@@ -369,18 +378,40 @@ static __init int stf_barrier_debugfs_init(void)
 device_initcall(stf_barrier_debugfs_init);
 #endif /* CONFIG_DEBUG_FS */
 
+static void no_count_cache_flush(void)
+{
+       count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+       pr_info("count-cache-flush: software flush disabled.\n");
+}
+
 static void toggle_count_cache_flush(bool enable)
 {
-       if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+       if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
+           !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
+               enable = false;
+
+       if (!enable) {
                patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
-               count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
-               pr_info("count-cache-flush: software flush disabled.\n");
+               pr_info("link-stack-flush: software flush disabled.\n");
+               link_stack_flush_enabled = false;
+               no_count_cache_flush();
                return;
        }
 
+       // This enables the branch from _switch to flush_count_cache
        patch_branch_site(&patch__call_flush_count_cache,
                          (u64)&flush_count_cache, BRANCH_SET_LINK);
 
+       pr_info("link-stack-flush: software flush enabled.\n");
+       link_stack_flush_enabled = true;
+
+       // If we just need to flush the link stack, patch an early return
+       if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+               patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
+               no_count_cache_flush();
+               return;
+       }
+
        if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
                count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
                pr_info("count-cache-flush: full software flush sequence enabled.\n");
@@ -399,11 +430,20 @@ void setup_count_cache_flush(void)
        if (no_spectrev2 || cpu_mitigations_off()) {
                if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
                    security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
-                       pr_warn("Spectre v2 mitigations not under software control, can't disable\n");
+                       pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
 
                enable = false;
        }
 
+       /*
+        * There's no firmware feature flag/hypervisor bit to tell us we need to
+        * flush the link stack on context switch. So we set it here if we see
+        * either of the Spectre v2 mitigations that aim to protect userspace.
+        */
+       if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
+           security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
+               security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
+
        toggle_count_cache_flush(enable);
 }