powerpc/64s: Add support for software count cache flush
authorMichael Ellerman <mpe@ellerman.id.au>
Fri, 29 Mar 2019 11:26:05 +0000 (22:26 +1100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Apr 2019 04:25:13 +0000 (06:25 +0200)
commit ee13cb249fabdff8b90aaff61add347749280087 upstream.

Some CPU revisions support a mode where the count cache needs to be
flushed by software on context switch. Additionally some revisions may
have a hardware accelerated flush, in which case the software flush
sequence can be shortened.

If we detect the appropriate flag from firmware we patch a branch
into _switch() which takes us to a count cache flush sequence.

That sequence in turn may be patched to return early if we detect that
the CPU supports accelerating the flush sequence in hardware.

Add debugfs support for reporting the state of the flush, as well as
runtime disabling it.

And modify the spectre_v2 sysfs file to report the state of the
software flush.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/powerpc/include/asm/asm-prototypes.h
arch/powerpc/include/asm/security_features.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/security.c

index 7330150bfe34aa00bf8cc3ce73e0236beced250b..ba4c75062d49ca94becd05b2b8a735f7ec46db7d 100644 (file)
@@ -126,4 +126,10 @@ extern int __ucmpdi2(u64, u64);
 void _mcount(void);
 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
 
+/* Patch sites */
+extern s32 patch__call_flush_count_cache;
+extern s32 patch__flush_count_cache_return;
+
+extern long flush_count_cache;
+
 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
index a0d47bc18a5c0a70bf178068e8d5a4f3da659989..759597bf0fd867bd6d4c151acb8acce7f7f3ff6b 100644 (file)
@@ -22,6 +22,7 @@ enum stf_barrier_type {
 
 void setup_stf_barrier(void);
 void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
 
 static inline void security_ftr_set(unsigned long feature)
 {
index 7a43b27dc6e07fb62bcbd34195d66607c6aeb24f..e40e74e8c63579dbd4ccbadb98570d18b8e9d823 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/thread_info.h>
+#include <asm/code-patching-asm.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cputable.h>
@@ -497,6 +498,57 @@ _GLOBAL(ret_from_kernel_thread)
        li      r3,0
        b       .Lsyscall_exit
 
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define FLUSH_COUNT_CACHE      \
+1:     nop;                    \
+       patch_site 1b, patch__call_flush_count_cache
+
+
+#define BCCTR_FLUSH    .long 0x4c400420
+
+.macro nops number
+       .rept \number
+       nop
+       .endr
+.endm
+
+.balign 32
+.global flush_count_cache
+flush_count_cache:
+       /* Save LR into r9 */
+       mflr    r9
+
+       .rept 64
+       bl      .+4
+       .endr
+       b       1f
+       nops    6
+
+       .balign 32
+       /* Restore LR */
+1:     mtlr    r9
+       li      r9,0x7fff
+       mtctr   r9
+
+       BCCTR_FLUSH
+
+2:     nop
+       patch_site 2b patch__flush_count_cache_return
+
+       nops    3
+
+       .rept 278
+       .balign 32
+       BCCTR_FLUSH
+       nops    7
+       .endr
+
+       blr
+#else
+#define FLUSH_COUNT_CACHE
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
 /*
  * This routine switches between two different tasks.  The process
  * state of one is saved on its kernel stack.  Then the state
@@ -528,6 +580,8 @@ _GLOBAL(_switch)
        std     r23,_CCR(r1)
        std     r1,KSP(r3)      /* Set old stack pointer */
 
+       FLUSH_COUNT_CACHE
+
        /*
         * On SMP kernels, care must be taken because a task may be
         * scheduled off CPUx and on to CPUy. Memory ordering must be
index 206488603b664d3e9922adb2fe4c067e3ccdea97..554d33c7b758d32403b7c737e3af10526dfe2fe8 100644 (file)
@@ -9,12 +9,21 @@
 #include <linux/seq_buf.h>
 
 #include <asm/debugfs.h>
+#include <asm/asm-prototypes.h>
+#include <asm/code-patching.h>
 #include <asm/security_features.h>
 #include <asm/setup.h>
 
 
 unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
 
+enum count_cache_flush_type {
+       COUNT_CACHE_FLUSH_NONE  = 0x1,
+       COUNT_CACHE_FLUSH_SW    = 0x2,
+       COUNT_CACHE_FLUSH_HW    = 0x4,
+};
+static enum count_cache_flush_type count_cache_flush_type;
+
 bool barrier_nospec_enabled;
 static bool no_nospec;
 
@@ -159,17 +168,29 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
        bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
        ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
 
-       if (bcs || ccd) {
+       if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+               bool comma = false;
                seq_buf_printf(&s, "Mitigation: ");
 
-               if (bcs)
+               if (bcs) {
                        seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
+                       comma = true;
+               }
+
+               if (ccd) {
+                       if (comma)
+                               seq_buf_printf(&s, ", ");
+                       seq_buf_printf(&s, "Indirect branch cache disabled");
+                       comma = true;
+               }
 
-               if (bcs && ccd)
+               if (comma)
                        seq_buf_printf(&s, ", ");
 
-               if (ccd)
-                       seq_buf_printf(&s, "Indirect branch cache disabled");
+               seq_buf_printf(&s, "Software count cache flush");
+
+               if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+                       seq_buf_printf(&s, "(hardware accelerated)");
        } else
                seq_buf_printf(&s, "Vulnerable");
 
@@ -326,4 +347,71 @@ static __init int stf_barrier_debugfs_init(void)
 }
 device_initcall(stf_barrier_debugfs_init);
 #endif /* CONFIG_DEBUG_FS */
+
+static void toggle_count_cache_flush(bool enable)
+{
+       if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+               patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+               count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+               pr_info("count-cache-flush: software flush disabled.\n");
+               return;
+       }
+
+       patch_branch_site(&patch__call_flush_count_cache,
+                         (u64)&flush_count_cache, BRANCH_SET_LINK);
+
+       if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
+               count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
+               pr_info("count-cache-flush: full software flush sequence enabled.\n");
+               return;
+       }
+
+       patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+       count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
+       pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
+}
+
+void setup_count_cache_flush(void)
+{
+       toggle_count_cache_flush(true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int count_cache_flush_set(void *data, u64 val)
+{
+       bool enable;
+
+       if (val == 1)
+               enable = true;
+       else if (val == 0)
+               enable = false;
+       else
+               return -EINVAL;
+
+       toggle_count_cache_flush(enable);
+
+       return 0;
+}
+
+static int count_cache_flush_get(void *data, u64 *val)
+{
+       if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
+               *val = 0;
+       else
+               *val = 1;
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
+                       count_cache_flush_set, "%llu\n");
+
+static __init int count_cache_flush_debugfs_init(void)
+{
+       debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
+                           NULL, &fops_count_cache_flush);
+       return 0;
+}
+device_initcall(count_cache_flush_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
 #endif /* CONFIG_PPC_BOOK3S_64 */