From b3ecd51559ae7a8f40b10443773b9cd0e6a50f5e Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 12 Feb 2016 13:02:07 -0800 Subject: [PATCH] x86/mm/pkeys: Add new 'PF_PK' page fault error code bit Note: "PK" is how the Intel SDM refers to this bit, so we also use that nomenclature. This only defines the bit, it does not plumb it anywhere to be handled. Signed-off-by: Dave Hansen Reviewed-by: Thomas Gleixner Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210207.DA7B43E6@viggo.jf.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index eef44d9a3f77..9f72f9c9be4f 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -33,6 +33,7 @@ * bit 2 == 0: kernel-mode access 1: user-mode access * bit 3 == 1: use of reserved bit detected * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access */ enum x86_pf_error_code { @@ -41,6 +42,7 @@ enum x86_pf_error_code { PF_USER = 1 << 2, PF_RSVD = 1 << 3, PF_INSTR = 1 << 4, + PF_PK = 1 << 5, }; /* @@ -916,6 +918,12 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) if ((error_code & PF_INSTR) && !pte_exec(*pte)) return 0; + /* + * Note: We do not do lazy flushing on protection key + * changes, so no spurious fault will ever set PF_PK. + */ + if ((error_code & PF_PK)) + return 1; return 1; } -- 2.20.1