i386: prepare shared mm/highmem.c
authorThomas Gleixner <tglx@linutronix.de>
Thu, 11 Oct 2007 09:13:55 +0000 (11:13 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Oct 2007 09:13:55 +0000 (11:13 +0200)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/i386/mm/Makefile
arch/i386/mm/highmem.c [deleted file]
arch/i386/mm/highmem_32.c [new file with mode: 0644]

index 78c242ccaed0aeddc25ac5d538d6acaccae8b094..2c81ad14a62bd22e3d5b3b1f5106afbc37a80da4 100644 (file)
@@ -6,5 +6,5 @@ obj-y   := init_32.o pgtable_32.o fault.o ioremap_32.o extable_32.o pageattr.o mma
 
 obj-$(CONFIG_NUMA) += discontig_32.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_HIGHMEM) += highmem_32.o
 obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap_32.o
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
deleted file mode 100644 (file)
index 1c3bf95..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-#include <linux/highmem.h>
-#include <linux/module.h>
-
-void *kmap(struct page *page)
-{
-       might_sleep();
-       if (!PageHighMem(page))
-               return page_address(page);
-       return kmap_high(page);
-}
-
-void kunmap(struct page *page)
-{
-       if (in_interrupt())
-               BUG();
-       if (!PageHighMem(page))
-               return;
-       kunmap_high(page);
-}
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
-{
-       enum fixed_addresses idx;
-       unsigned long vaddr;
-
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       pagefault_disable();
-
-       if (!PageHighMem(page))
-               return page_address(page);
-
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       BUG_ON(!pte_none(*(kmap_pte-idx)));
-       set_pte(kmap_pte-idx, mk_pte(page, prot));
-       arch_flush_lazy_mmu_mode();
-
-       return (void *)vaddr;
-}
-
-void *kmap_atomic(struct page *page, enum km_type type)
-{
-       return kmap_atomic_prot(page, type, kmap_prot);
-}
-
-void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-       /*
-        * Force other mappings to Oops if they'll try to access this pte
-        * without first remap it.  Keeping stale mappings around is a bad idea
-        * also, in case the page changes cacheability attributes or becomes
-        * a protected page in a hypervisor.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
-               kpte_clear_flush(kmap_pte-idx, vaddr);
-       else {
-#ifdef CONFIG_DEBUG_HIGHMEM
-               BUG_ON(vaddr < PAGE_OFFSET);
-               BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
-       }
-
-       arch_flush_lazy_mmu_mode();
-       pagefault_enable();
-}
-
-/* This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-{
-       enum fixed_addresses idx;
-       unsigned long vaddr;
-
-       pagefault_disable();
-
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
-       arch_flush_lazy_mmu_mode();
-
-       return (void*) vaddr;
-}
-
-struct page *kmap_atomic_to_page(void *ptr)
-{
-       unsigned long idx, vaddr = (unsigned long)ptr;
-       pte_t *pte;
-
-       if (vaddr < FIXADDR_START)
-               return virt_to_page(ptr);
-
-       idx = virt_to_fix(vaddr);
-       pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-       return pte_page(*pte);
-}
-
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/i386/mm/highmem_32.c b/arch/i386/mm/highmem_32.c
new file mode 100644 (file)
index 0000000..1c3bf95
--- /dev/null
@@ -0,0 +1,113 @@
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+void *kmap(struct page *page)
+{
+       might_sleep();
+       if (!PageHighMem(page))
+               return page_address(page);
+       return kmap_high(page);
+}
+
+void kunmap(struct page *page)
+{
+       if (in_interrupt())
+               BUG();
+       if (!PageHighMem(page))
+               return;
+       kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ */
+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+
+       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       pagefault_disable();
+
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       BUG_ON(!pte_none(*(kmap_pte-idx)));
+       set_pte(kmap_pte-idx, mk_pte(page, prot));
+       arch_flush_lazy_mmu_mode();
+
+       return (void *)vaddr;
+}
+
+void *kmap_atomic(struct page *page, enum km_type type)
+{
+       return kmap_atomic_prot(page, type, kmap_prot);
+}
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+       unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+       /*
+        * Force other mappings to Oops if they'll try to access this pte
+        * without first remap it.  Keeping stale mappings around is a bad idea
+        * also, in case the page changes cacheability attributes or becomes
+        * a protected page in a hypervisor.
+        */
+       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+               kpte_clear_flush(kmap_pte-idx, vaddr);
+       else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+               BUG_ON(vaddr < PAGE_OFFSET);
+               BUG_ON(vaddr >= (unsigned long)high_memory);
+#endif
+       }
+
+       arch_flush_lazy_mmu_mode();
+       pagefault_enable();
+}
+
+/* This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+
+       pagefault_disable();
+
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+       arch_flush_lazy_mmu_mode();
+
+       return (void*) vaddr;
+}
+
+struct page *kmap_atomic_to_page(void *ptr)
+{
+       unsigned long idx, vaddr = (unsigned long)ptr;
+       pte_t *pte;
+
+       if (vaddr < FIXADDR_START)
+               return virt_to_page(ptr);
+
+       idx = virt_to_fix(vaddr);
+       pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+       return pte_page(*pte);
+}
+
+EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_to_page);