ARM: pgtable: move TOP_PTE address definitions to arch/arm/mm/mm.h
authorRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 2 Jul 2011 13:46:27 +0000 (14:46 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 26 Jan 2012 20:06:14 +0000 (20:06 +0000)
Move the TOP_PTE address definitions to one central place so that it's
easy to discover what they're being used for.  This helps to ensure
that there are no overlaps.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/mm/copypage-v4mc.c
arch/arm/mm/copypage-v6.c
arch/arm/mm/copypage-xscale.c
arch/arm/mm/flush.c
arch/arm/mm/mm.h

index 7d0a8c2303423ec93df81b7ea70778df636c7b64..87a23ca1fc6144e9547ff0c268888057c5c8b727 100644 (file)
 
 #include "mm.h"
 
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
                                  L_PTE_MT_MINICACHE)
 
@@ -78,10 +74,10 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
 
        raw_spin_lock(&minicache_lock);
 
-       set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
-       flush_tlb_kernel_page(0xffff8000);
+       set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
+       flush_tlb_kernel_page(COPYPAGE_MINICACHE);
 
-       mc_copy_user_page((void *)0xffff8000, kto);
+       mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
 
        raw_spin_unlock(&minicache_lock);
 
index 3d9a1552cef60321f5349438067149979e055b6e..c00a750144359c3fe9dfe2866b02b44589407d6f 100644 (file)
@@ -24,9 +24,6 @@
 #error FIX ME
 #endif
 
-#define from_address   (0xffff8000)
-#define to_address     (0xffffc000)
-
 static DEFINE_RAW_SPINLOCK(v6_lock);
 
 /*
@@ -90,11 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
         */
        raw_spin_lock(&v6_lock);
 
-       set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
-       set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
+       kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
+       kto   = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
 
-       kfrom = from_address + (offset << PAGE_SHIFT);
-       kto   = to_address + (offset << PAGE_SHIFT);
+       set_pte_ext(TOP_PTE(kfrom), pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
+       set_pte_ext(TOP_PTE(kto), pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
 
        flush_tlb_kernel_page(kfrom);
        flush_tlb_kernel_page(kto);
@@ -111,8 +108,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
  */
 static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
 {
-       unsigned int offset = CACHE_COLOUR(vaddr);
-       unsigned long to = to_address + (offset << PAGE_SHIFT);
+       unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 
        /* FIXME: not highmem safe */
        discard_old_kernel_data(page_address(page));
@@ -123,7 +119,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
         */
        raw_spin_lock(&v6_lock);
 
-       set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
+       set_pte_ext(TOP_PTE(to), pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
        flush_tlb_kernel_page(to);
        clear_page((void *)to);
 
index 610c24ced3101958a269fa46d53050eb4cac0df7..90f3bb58eafab1a62bc32c794e091457cb85613a 100644 (file)
 
 #include "mm.h"
 
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
-#define COPYPAGE_MINICACHE     0xffff8000
-
 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
                                  L_PTE_MT_MINICACHE)
 
index 1a8d4aa821bec152bc1b87736328f7b42b88d1f5..f4d407af4690a7bcd28584f88a55e7f79471cb71 100644 (file)
 
 #ifdef CONFIG_CPU_CACHE_VIPT
 
-#define ALIAS_FLUSH_START      0xffff4000
-
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 {
-       unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
+       unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
        const int zero = 0;
 
        set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
@@ -46,8 +44,8 @@ static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned
        unsigned long offset = vaddr & (PAGE_SIZE - 1);
        unsigned long to;
 
-       set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
-       to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
+       set_pte_ext(TOP_PTE(FLUSH_ALIAS_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
+       to = FLUSH_ALIAS_START + (colour << PAGE_SHIFT) + offset;
        flush_tlb_kernel_page(to);
        flush_icache_range(to, to + len);
 }
index 70f6d3ea48340fb7d8ac2aa6d8e9046a467117c8..6ee1ff2c1da641be1b5034c868b2ae737ba9bea1 100644 (file)
@@ -5,6 +5,19 @@ extern pmd_t *top_pmd;
 
 #define TOP_PTE(x)     pte_offset_kernel(top_pmd, x)
 
+/*
+ * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
+ * specific hacks for copying pages efficiently, while 0xffff4000
+ * is reserved for VIPT aliasing flushing by generic code.
+ *
+ * Note that we don't allow VIPT aliasing caches with SMP.
+ */
+#define COPYPAGE_MINICACHE     0xffff8000
+#define COPYPAGE_V6_FROM       0xffff8000
+#define COPYPAGE_V6_TO         0xffffc000
+/* PFN alias flushing, for VIPT caches */
+#define FLUSH_ALIAS_START      0xffff4000
+
 static inline pmd_t *pmd_off_k(unsigned long virt)
 {
        return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);