From: Jeff Dike Date: Sat, 3 Sep 2005 22:57:41 +0000 (-0700) Subject: [PATCH] uml: merge duplicated page table code X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=08964c565b2fe49e338ffbe4907adcc19647ef16;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [PATCH] uml: merge duplicated page table code There is a lot of code which is duplicated between the 2 and 3 level implementation, with the only difference that the 3-level implementation is a bit more generalized (instead of accessing directly pte_t.pte, it uses the appropriate access macros). So this code is joined together. As obvious, a "core code nice cleanup" is not a "stability-friendly patch" so usual care applies. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Jeff Dike Cc: Paolo Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h index 9b3abc01d60e..ffe017f6b64b 100644 --- a/include/asm-um/pgtable-2level.h +++ b/include/asm-um/pgtable-2level.h @@ -35,35 +35,8 @@ static inline int pgd_newpage(pgd_t pgd) { return 0; } static inline void pgd_mkuptodate(pgd_t pgd) { } -#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) - -static inline pte_t pte_mknewprot(pte_t pte) -{ - pte_val(pte) |= _PAGE_NEWPROT; - return(pte); -} - -static inline pte_t pte_mknewpage(pte_t pte) -{ - pte_val(pte) |= _PAGE_NEWPAGE; - return(pte); -} - -static inline void set_pte(pte_t *pteptr, pte_t pteval) -{ - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to - * mapped pages. - */ - *pteptr = pte_mknewpage(pteval); - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); -} -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) - #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) -#define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pte_none(x) !(pte_val(x) & ~_PAGE_NEWPAGE) #define pte_pfn(x) phys_to_pfn(pte_val(x)) #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h index 65e8bfc55fc4..32cd6236f245 100644 --- a/include/asm-um/pgtable-3level.h +++ b/include/asm-um/pgtable-3level.h @@ -57,35 +57,6 @@ static inline int pgd_newpage(pgd_t pgd) static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } - -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) - -static inline pte_t pte_mknewprot(pte_t pte) -{ - pte_set_bits(pte, _PAGE_NEWPROT); - return(pte); -} - -static inline pte_t pte_mknewpage(pte_t pte) -{ - pte_set_bits(pte, _PAGE_NEWPAGE); - return(pte); -} - -static inline void set_pte(pte_t *pteptr, pte_t pteval) -{ - pte_copy(*pteptr, pteval); - - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to - * mapped pages. - */ - - *pteptr = pte_mknewpage(*pteptr); - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); -} -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) - #define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval)) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) @@ -113,13 +84,6 @@ static inline void pud_clear (pud_t * pud) { } #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) -#define pte_page(x) pfn_to_page(pte_pfn(x)) - -static inline int pte_none(pte_t pte) -{ - return pte_is_zero(pte); -} - static inline unsigned long pte_pfn(pte_t pte) { return phys_to_pfn(pte_val(pte)); diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index e9336f616f9c..b48e0966ecd7 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -153,10 +153,24 @@ extern unsigned long pg0[1024]; #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) +#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) #define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT)) #define phys_addr(p) ((p) & ~REGION_MASK) +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) + +/* + * ================================= + * Flags checking section. + * ================================= + */ + +static inline int pte_none(pte_t pte) +{ + return pte_is_zero(pte); +} + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -212,6 +226,18 @@ static inline int pte_newprot(pte_t pte) return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); } +/* + * ================================= + * Flags setting section. + * ================================= + */ + +static inline pte_t pte_mknewprot(pte_t pte) +{ + pte_set_bits(pte, _PAGE_NEWPROT); + return(pte); +} + static inline pte_t pte_rdprotect(pte_t pte) { pte_clear_bits(pte, _PAGE_USER); @@ -280,6 +306,26 @@ static inline pte_t pte_mkuptodate(pte_t pte) return(pte); } +static inline pte_t pte_mknewpage(pte_t pte) +{ + pte_set_bits(pte, _PAGE_NEWPAGE); + return(pte); +} + +static inline void set_pte(pte_t *pteptr, pte_t pteval) +{ + pte_copy(*pteptr, pteval); + + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to + * mapped pages. + */ + + *pteptr = pte_mknewpage(*pteptr); + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); +} +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + extern phys_t page_to_phys(struct page *page); /*