mm, vmalloc: change iterating a vmlist to find_vm_area()
authorJoonsoo Kim <js1304@gmail.com>
Mon, 29 Apr 2013 22:07:27 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 29 Apr 2013 22:54:33 +0000 (15:54 -0700)
This patchset removes vm_struct list management after initializing
vmalloc.  Adding and removing an entry to vmlist is linear time
complexity, so it is inefficient.  If we maintain this list, overall
time complexity of adding and removing area to vmalloc space is O(N),
although we use rbtree for finding vacant place and it's time complexity
is just O(logN).

And vmlist and vmlist_lock is used many places of outside of vmalloc.c.
It is preferable that we hide this raw data structure and provide
well-defined function for supporting them, because it makes that they
cannot mistake when manipulating theses structure and it makes us easily
maintain vmalloc layer.

For kexec and makedumpfile, I export vmap_area_list, instead of vmlist.
This comes from Atsushi's recommendation.  For more information, please
refer below link.  https://lkml.org/lkml/2012/12/6/184

This patch:

The purpose of iterating a vmlist is finding vm area with specific virtual
address.  find_vm_area() is provided for this purpose and more efficient,
because it uses a rbtree.  So change it.

Signed-off-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Dave Anderson <anderson@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/tile/mm/pgtable.c
arch/unicore32/mm/ioremap.c
arch/x86/mm/ioremap.c

index b3b4972c245171e8188f120997a0c4f8c786499e..dfd63ce873273b2d9055b416e58e4d81e3eeb920 100644 (file)
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
           in parallel. Reuse of the virtual address is prevented by
           leaving it in the global lists until we're done with it.
           cpa takes care of the direct mappings. */
-       read_lock(&vmlist_lock);
-       for (p = vmlist; p; p = p->next) {
-               if (p->addr == addr)
-                       break;
-       }
-       read_unlock(&vmlist_lock);
+       p = find_vm_area((void *)addr);
 
        if (!p) {
                pr_err("iounmap: bad address %p\n", addr);
index b7a605597b08cf914f501628b552612844078378..13068ee22f33ded1a66b9207f8068a5aa29905f1 100644 (file)
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
 void __uc32_iounmap(volatile void __iomem *io_addr)
 {
        void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
-       struct vm_struct **p, *tmp;
+       struct vm_struct *vm;
 
        /*
         * If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
         * all the mappings before the area can be reclaimed
         * by someone else.
         */
-       write_lock(&vmlist_lock);
-       for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
-               if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
-                       if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
-                               unmap_area_sections((unsigned long)tmp->addr,
-                                                   tmp->size);
-                       }
-                       break;
-               }
-       }
-       write_unlock(&vmlist_lock);
+       vm = find_vm_area(addr);
+       if (vm && (vm->flags & VM_IOREMAP) &&
+               (vm->flags & VM_UNICORE_SECTION_MAPPING))
+               unmap_area_sections((unsigned long)vm->addr, vm->size);
 
        vunmap(addr);
 }
index 78fe3f1ac49f6d278687337f1887e360787ac3a2..9a1e6583910c2e5ada549f52ff8dcb19639c3cd3 100644 (file)
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
           in parallel. Reuse of the virtual address is prevented by
           leaving it in the global lists until we're done with it.
           cpa takes care of the direct mappings. */
-       read_lock(&vmlist_lock);
-       for (p = vmlist; p; p = p->next) {
-               if (p->addr == (void __force *)addr)
-                       break;
-       }
-       read_unlock(&vmlist_lock);
+       p = find_vm_area((void __force *)addr);
 
        if (!p) {
                printk(KERN_ERR "iounmap: bad address %p\n", addr);