From d6d861e3c963b4077c83e078e3e300c4b81f93e7 Mon Sep 17 00:00:00 2001 From: Zachary Amsden Date: Sat, 30 Sep 2006 23:29:36 -0700 Subject: [PATCH] [PATCH] paravirt: optimize ptep establish for pae The ptep_establish macro is only used on user-level PTEs, for P->P mapping changes. Since these always happen under protection of the pagetable lock, the strong synchronization of a 64-bit cmpxchg is not needed, in fact, not even a lock prefix needs to be used. We can simply instead clear the P-bit, followed by a normal set. The write ordering is still important to avoid the possibility of the TLB snooping a partially written PTE and getting a bad mapping installed. Signed-off-by: Zachary Amsden Cc: Rusty Russell Cc: Jeremy Fitzhardinge Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/pgtable-2level.h | 1 + include/asm-i386/pgtable-3level.h | 15 +++++++++++++++ include/asm-i386/pgtable.h | 11 +++++++++++ 3 files changed, 27 insertions(+) diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 201c86a6711..8d8d3b9ecdb 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h @@ -16,6 +16,7 @@ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) +#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index 0d899173232..7c58debdb39 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h @@ -58,6 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +/* + * Since this is only called on user PTEs, and the page fault handler + * must handle the already racy situation of simultaneous page faults, + * we are justified in merely clearing the PTE present bit, followed + * by a set. The ordering here is important. + */ +static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) +{ + ptep->pte_low = 0; + smp_wmb(); + ptep->pte_high = pte.pte_high; + smp_wmb(); + ptep->pte_low = pte.pte_low; +} + #define __HAVE_ARCH_SET_PTE_ATOMIC #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index ee9696d2f67..8cb708a6bed 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -269,6 +269,17 @@ do { \ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +/* + * Rules for using ptep_establish: the pte MUST be a user pte, and + * must be a present->present transition. + */ +#define __HAVE_ARCH_PTEP_ESTABLISH +#define ptep_establish(vma, address, ptep, pteval) \ +do { \ + set_pte_present((vma)->vm_mm, address, ptep, pteval); \ + flush_tlb_page(vma, address); \ +} while (0) + #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define ptep_clear_flush_dirty(vma, address, ptep) \ ({ \ -- 2.20.1