um: Avoid marking pages with "changed protection"
authorAnton Ivanov <anton.ivanov@cambridgegreys.com>
Wed, 5 Dec 2018 12:37:41 +0000 (12:37 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Feb 2019 18:46:08 +0000 (19:46 +0100)
[ Upstream commit 8892d8545f2d0342b9c550defbfb165db237044b ]

Changing protection is a very high cost operation in UML
because in addition to an extra syscall it also interrupts
mmap merge sequences generated by the tlb.

While the condition is not particularly common it is worth
avoiding.

Signed-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/um/include/asm/pgtable.h

index 7485398d07370034e361ea1fe4173eeb9bd51d1c..9c04562310b36630bb3ec008c99fdf05e3597c85 100644 (file)
@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
 
 static inline pte_t pte_wrprotect(pte_t pte)
 { 
-       pte_clear_bits(pte, _PAGE_RW);
+       if (likely(pte_get_bits(pte, _PAGE_RW)))
+               pte_clear_bits(pte, _PAGE_RW);
+       else
+               return pte;
        return(pte_mknewprot(pte)); 
 }
 
 static inline pte_t pte_mkread(pte_t pte)
 { 
+       if (unlikely(pte_get_bits(pte, _PAGE_USER)))
+               return pte;
        pte_set_bits(pte, _PAGE_USER);
        return(pte_mknewprot(pte)); 
 }
@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
 
 static inline pte_t pte_mkwrite(pte_t pte)     
 {
+       if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
+               return pte;
        pte_set_bits(pte, _PAGE_RW);
        return(pte_mknewprot(pte)); 
 }