sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Mon, 11 May 2015 15:52:09 +0000 (17:52 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 06:39:14 +0000 (08:39 +0200)
The existing code relies on pagefault_disable() implicitly disabling
preemption, so that no schedule will happen between kmap_atomic() and
kunmap_atomic().

Let's make this explicit, to prepare for pagefault_disable() not
touching preemption anymore.

Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bigeasy@linutronix.de
Cc: borntraeger@de.ibm.com
Cc: daniel.vetter@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: hocko@suse.cz
Cc: hughd@google.com
Cc: mst@redhat.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: schwidefsky@de.ibm.com
Cc: yang.shi@windriver.com
Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
15 files changed:
arch/arm/mm/highmem.c
arch/frv/mm/highmem.c
arch/metag/mm/highmem.c
arch/microblaze/mm/highmem.c
arch/mips/mm/highmem.c
arch/mn10300/include/asm/highmem.h
arch/parisc/include/asm/cacheflush.h
arch/powerpc/mm/highmem.c
arch/sparc/mm/highmem.c
arch/tile/mm/highmem.c
arch/x86/mm/highmem_32.c
arch/x86/mm/iomap_32.c
arch/xtensa/mm/highmem.c
include/linux/highmem.h
include/linux/io-mapping.h

index b98895d9fe57cc4bde62392a3f7144b9ab23c70f..ee8dfa793989785488a306a9edd8b7899f3f1f3b 100644 (file)
@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
        void *kmap;
        int type;
 
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
        }
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        int idx, type;
        struct page *page = pfn_to_page(pfn);
 
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
index bed9a9bd3c10c84e004c845839f0ad53c0565e45..785344bbdc07c360e81768c8472336bebd0baa3e 100644 (file)
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
        unsigned long paddr;
        int type;
 
+       preempt_disable();
        pagefault_disable();
        type = kmap_atomic_idx_push();
        paddr = page_to_phys(page);
@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
        }
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index d71f621a2c0b92adb5679162d1cd59568440986b..807f1b1c4e6567738f676c935ad21483e4972a32 100644 (file)
@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        int type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        unsigned long vaddr;
        int type;
 
+       preempt_disable();
        pagefault_disable();
 
        type = kmap_atomic_idx_push();
index 5a92576fad927127eb05fbe24833b57e18ba7155..2fcc5a52d84d1c2cf25d0cc45c356ce14549d74e 100644 (file)
@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
 #endif
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index da815d295239baaaf6e1d1069e2255baa8d75358..11661cbc11a8193f7a9817fb0101fd9483db9f23 100644 (file)
@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
 #endif
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        unsigned long vaddr;
        int idx, type;
 
+       preempt_disable();
        pagefault_disable();
 
        type = kmap_atomic_idx_push();
index 2fbbe4d920aa2efb353ed5fd52babaf309a386db..1ddea5afba09344ba8e807e6ce8f5edc5c26241f 100644 (file)
@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        int idx, type;
 
+       preempt_disable();
        pagefault_disable();
        if (page < highmem_start_page)
                return page_address(page);
@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
 
        if (vaddr < FIXADDR_START) { /* FIXME */
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
 
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 #endif /* __KERNEL__ */
 
index de65f66ea64e7538f4f7c431ca3800e86c152e5d..ec2df4bab3022dfc35a83539afcd24207e199f9f 100644 (file)
@@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
 
 static inline void *kmap_atomic(struct page *page)
 {
+       preempt_disable();
        pagefault_disable();
        return page_address(page);
 }
@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
 {
        flush_kernel_dcache_page_addr(addr);
        pagefault_enable();
+       preempt_enable();
 }
 
 #define kmap_atomic_prot(page, prot)   kmap_atomic(page)
index e7450bdbe83a9380264fc149c4831b587226cd36..e292c8a609523bd30e860f5e1a469872389e7421 100644 (file)
@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
 
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 449f864f0cefdb8918bf0002d40dc87ba2cb8db3..a454ec5ff07af7f33d8ec548051101c3895e9b82 100644 (file)
@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        long idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
 
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 6aa2f26254471e730e29b0c1f1382c3f7a4d0f64..fcd545014e79dcc83a662f4db5a37e7511e695a0 100644 (file)
@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        int idx, type;
        pte_t *pte;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
 
        /* Avoid icache flushes by disallowing atomic executable mappings. */
@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index 4500142bc4aa46429cb2be41a7ee3407426f6155..eecb207a2037080f9f5d74c36c300b217a4f7a82 100644 (file)
@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
 
        if (!PageHighMem(page))
@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
 #endif
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index 9ca35fc60cfeaa1a8c461f76956cd22d587226a7..2b7ece0e103aec9e501c6018f00aa6113e6999f5 100644 (file)
@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
+       preempt_disable();
        pagefault_disable();
 
        type = kmap_atomic_idx_push();
@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL_GPL(iounmap_atomic);
index 8cfb71ec0937369a8adf79a3d6e754f589cbba5f..184ceadccc1a3aca946b3c9e609a2efb91046807 100644 (file)
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
        enum fixed_addresses idx;
        unsigned long vaddr;
 
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index 9286a46b7d69b539f027bcc890b3be976d20f228..6aefcd0031a6bd013cf322d1021812fcf84250c2 100644 (file)
@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
 
 static inline void *kmap_atomic(struct page *page)
 {
+       preempt_disable();
        pagefault_disable();
        return page_address(page);
 }
@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
 static inline void __kunmap_atomic(void *addr)
 {
        pagefault_enable();
+       preempt_enable();
 }
 
 #define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
index 657fab4efab351070a9aaf7a625759159a786580..c27dde7215b5b291394747d35e1f4a19d9f1ac8e 100644 (file)
@@ -141,6 +141,7 @@ static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset)
 {
+       preempt_disable();
        pagefault_disable();
        return ((char __force __iomem *) mapping) + offset;
 }
@@ -149,6 +150,7 @@ static inline void
 io_mapping_unmap_atomic(void __iomem *vaddr)
 {
        pagefault_enable();
+       preempt_enable();
 }
 
 /* Non-atomic map/unmap */