mm: strictly nested kmap_atomic()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 26 Oct 2010 21:21:47 +0000 (14:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Oct 2010 23:52:08 +0000 (16:52 -0700)
Ensure kmap_atomic() usage is strictly nested

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
crypto/async_tx/async_memcpy.c
crypto/blkcipher.c
drivers/block/loop.c
include/linux/highmem.h
kernel/power/snapshot.c

index 0ec1fb69d4eacc2a310504caa9109186f9e89370..518c22bd9562ee20cb040c63d7f9eb6a62b4d5c0 100644 (file)
@@ -83,8 +83,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
 
                memcpy(dest_buf, src_buf, len);
 
-               kunmap_atomic(dest_buf, KM_USER0);
                kunmap_atomic(src_buf, KM_USER1);
+               kunmap_atomic(dest_buf, KM_USER0);
 
                async_tx_sync_epilog(submit);
        }
index 90d26c91f4e9e183b2a41363318344e3d646c20a..7a7219266e3cc343d1a9a63a492a9cd38e876db5 100644 (file)
@@ -89,9 +89,9 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
                memcpy(walk->dst.virt.addr, walk->page, n);
                blkcipher_unmap_dst(walk);
        } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
-               blkcipher_unmap_src(walk);
                if (walk->flags & BLKCIPHER_WALK_DIFF)
                        blkcipher_unmap_dst(walk);
+               blkcipher_unmap_src(walk);
        }
 
        scatterwalk_advance(&walk->in, n);
index 6c48b3545f84583d0e32f1e87308c39a42b2133c..450c958b514fea941e5f11de2b248dec6e7477c0 100644 (file)
@@ -101,8 +101,8 @@ static int transfer_none(struct loop_device *lo, int cmd,
        else
                memcpy(raw_buf, loop_buf, size);
 
-       kunmap_atomic(raw_buf, KM_USER0);
        kunmap_atomic(loop_buf, KM_USER1);
+       kunmap_atomic(raw_buf, KM_USER0);
        cond_resched();
        return 0;
 }
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
        for (i = 0; i < size; i++)
                *out++ = *in++ ^ key[(i & 511) % keysize];
 
-       kunmap_atomic(raw_buf, KM_USER0);
        kunmap_atomic(loop_buf, KM_USER1);
+       kunmap_atomic(raw_buf, KM_USER0);
        cond_resched();
        return 0;
 }
index e3060ef85b6dead79e7922faaaffd1351f209e95..283cd47bb34c4a9d71e28e71ea70c89460f715b2 100644 (file)
@@ -201,8 +201,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
        vfrom = kmap_atomic(from, KM_USER0);
        vto = kmap_atomic(to, KM_USER1);
        copy_user_page(vto, vfrom, vaddr, to);
-       kunmap_atomic(vfrom, KM_USER0);
        kunmap_atomic(vto, KM_USER1);
+       kunmap_atomic(vfrom, KM_USER0);
 }
 
 #endif
@@ -214,8 +214,8 @@ static inline void copy_highpage(struct page *to, struct page *from)
        vfrom = kmap_atomic(from, KM_USER0);
        vto = kmap_atomic(to, KM_USER1);
        copy_page(vto, vfrom);
-       kunmap_atomic(vfrom, KM_USER0);
        kunmap_atomic(vto, KM_USER1);
+       kunmap_atomic(vfrom, KM_USER0);
 }
 
 #endif /* _LINUX_HIGHMEM_H */
index ac7eb109f19635d11930a9cef03ed4cd352cc235..9e3581f4619a3e8acc740ef476bf603bc57c04bb 100644 (file)
@@ -984,8 +984,8 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
                src = kmap_atomic(s_page, KM_USER0);
                dst = kmap_atomic(d_page, KM_USER1);
                do_copy_page(dst, src);
-               kunmap_atomic(src, KM_USER0);
                kunmap_atomic(dst, KM_USER1);
+               kunmap_atomic(src, KM_USER0);
        } else {
                if (PageHighMem(d_page)) {
                        /* Page pointed to by src may contain some kernel
@@ -2273,8 +2273,8 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
        memcpy(buf, kaddr1, PAGE_SIZE);
        memcpy(kaddr1, kaddr2, PAGE_SIZE);
        memcpy(kaddr2, buf, PAGE_SIZE);
-       kunmap_atomic(kaddr1, KM_USER0);
        kunmap_atomic(kaddr2, KM_USER1);
+       kunmap_atomic(kaddr1, KM_USER0);
 }
 
 /**