mm, vmalloc: export vmap_area_list, instead of vmlist
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / vmalloc.c
index 0f751f2068c3f02d7b69b5fd90f4354831172eb5..7e63984eb585b2bf264edb0370da11128475eefc 100644 (file)
@@ -261,7 +261,8 @@ struct vmap_area {
 };
 
 static DEFINE_SPINLOCK(vmap_area_lock);
-static LIST_HEAD(vmap_area_list);
+/* Export for kexec only */
+LIST_HEAD(vmap_area_list);
 static struct rb_root vmap_area_root = RB_ROOT;
 
 /* The vmap cache globals are protected by vmap_area_lock */
@@ -272,6 +273,10 @@ static unsigned long cached_align;
 
 static unsigned long vmap_area_pcpu_hole;
 
+/*** Old vmalloc interfaces ***/
+static DEFINE_RWLOCK(vmlist_lock);
+static struct vm_struct *vmlist;
+
 static struct vmap_area *__find_vmap_area(unsigned long addr)
 {
        struct rb_node *n = vmap_area_root.rb_node;
@@ -1283,26 +1288,31 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
 }
 EXPORT_SYMBOL_GPL(map_vm_area);
 
-/*** Old vmalloc interfaces ***/
-DEFINE_RWLOCK(vmlist_lock);
-struct vm_struct *vmlist;
-
 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, const void *caller)
 {
+       spin_lock(&vmap_area_lock);
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
        va->vm = vm;
        va->flags |= VM_VM_AREA;
+       spin_unlock(&vmap_area_lock);
 }
 
 static void insert_vmalloc_vmlist(struct vm_struct *vm)
 {
        struct vm_struct *tmp, **p;
 
+       /*
+        * Before removing VM_UNLIST,
+        * we should make sure that vm has proper values.
+        * Pair with smp_rmb() in show_numa_info().
+        */
+       smp_wmb();
        vm->flags &= ~VM_UNLIST;
+
        write_lock(&vmlist_lock);
        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
                if (tmp->addr >= vm->addr)
@@ -1447,6 +1457,11 @@ struct vm_struct *remove_vm_area(const void *addr)
        if (va && va->flags & VM_VM_AREA) {
                struct vm_struct *vm = va->vm;
 
+               spin_lock(&vmap_area_lock);
+               va->vm = NULL;
+               va->flags &= ~VM_VM_AREA;
+               spin_unlock(&vmap_area_lock);
+
                if (!(vm->flags & VM_UNLIST)) {
                        struct vm_struct *tmp, **p;
                        /*
@@ -2005,7 +2020,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
 
 long vread(char *buf, char *addr, unsigned long count)
 {
-       struct vm_struct *tmp;
+       struct vmap_area *va;
+       struct vm_struct *vm;
        char *vaddr, *buf_start = buf;
        unsigned long buflen = count;
        unsigned long n;
@@ -2014,10 +2030,17 @@ long vread(char *buf, char *addr, unsigned long count)
        if ((unsigned long) addr + count < count)
                count = -(unsigned long) addr;
 
-       read_lock(&vmlist_lock);
-       for (tmp = vmlist; count && tmp; tmp = tmp->next) {
-               vaddr = (char *) tmp->addr;
-               if (addr >= vaddr + tmp->size - PAGE_SIZE)
+       spin_lock(&vmap_area_lock);
+       list_for_each_entry(va, &vmap_area_list, list) {
+               if (!count)
+                       break;
+
+               if (!(va->flags & VM_VM_AREA))
+                       continue;
+
+               vm = va->vm;
+               vaddr = (char *) vm->addr;
+               if (addr >= vaddr + vm->size - PAGE_SIZE)
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
@@ -2027,10 +2050,10 @@ long vread(char *buf, char *addr, unsigned long count)
                        addr++;
                        count--;
                }
-               n = vaddr + tmp->size - PAGE_SIZE - addr;
+               n = vaddr + vm->size - PAGE_SIZE - addr;
                if (n > count)
                        n = count;
-               if (!(tmp->flags & VM_IOREMAP))
+               if (!(vm->flags & VM_IOREMAP))
                        aligned_vread(buf, addr, n);
                else /* IOREMAP area is treated as memory hole */
                        memset(buf, 0, n);
@@ -2039,7 +2062,7 @@ long vread(char *buf, char *addr, unsigned long count)
                count -= n;
        }
 finished:
-       read_unlock(&vmlist_lock);
+       spin_unlock(&vmap_area_lock);
 
        if (buf == buf_start)
                return 0;
@@ -2078,7 +2101,8 @@ finished:
 
 long vwrite(char *buf, char *addr, unsigned long count)
 {
-       struct vm_struct *tmp;
+       struct vmap_area *va;
+       struct vm_struct *vm;
        char *vaddr;
        unsigned long n, buflen;
        int copied = 0;
@@ -2088,10 +2112,17 @@ long vwrite(char *buf, char *addr, unsigned long count)
                count = -(unsigned long) addr;
        buflen = count;
 
-       read_lock(&vmlist_lock);
-       for (tmp = vmlist; count && tmp; tmp = tmp->next) {
-               vaddr = (char *) tmp->addr;
-               if (addr >= vaddr + tmp->size - PAGE_SIZE)
+       spin_lock(&vmap_area_lock);
+       list_for_each_entry(va, &vmap_area_list, list) {
+               if (!count)
+                       break;
+
+               if (!(va->flags & VM_VM_AREA))
+                       continue;
+
+               vm = va->vm;
+               vaddr = (char *) vm->addr;
+               if (addr >= vaddr + vm->size - PAGE_SIZE)
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
@@ -2100,10 +2131,10 @@ long vwrite(char *buf, char *addr, unsigned long count)
                        addr++;
                        count--;
                }
-               n = vaddr + tmp->size - PAGE_SIZE - addr;
+               n = vaddr + vm->size - PAGE_SIZE - addr;
                if (n > count)
                        n = count;
-               if (!(tmp->flags & VM_IOREMAP)) {
+               if (!(vm->flags & VM_IOREMAP)) {
                        aligned_vwrite(buf, addr, n);
                        copied++;
                }
@@ -2112,7 +2143,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
                count -= n;
        }
 finished:
-       read_unlock(&vmlist_lock);
+       spin_unlock(&vmap_area_lock);
        if (!copied)
                return 0;
        return buflen;
@@ -2519,19 +2550,19 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 
 #ifdef CONFIG_PROC_FS
 static void *s_start(struct seq_file *m, loff_t *pos)
-       __acquires(&vmlist_lock)
+       __acquires(&vmap_area_lock)
 {
        loff_t n = *pos;
-       struct vm_struct *v;
+       struct vmap_area *va;
 
-       read_lock(&vmlist_lock);
-       v = vmlist;
-       while (n > 0 && v) {
+       spin_lock(&vmap_area_lock);
+       va = list_entry((&vmap_area_list)->next, typeof(*va), list);
+       while (n > 0 && &va->list != &vmap_area_list) {
                n--;
-               v = v->next;
+               va = list_entry(va->list.next, typeof(*va), list);
        }
-       if (!n)
-               return v;
+       if (!n && &va->list != &vmap_area_list)
+               return va;
 
        return NULL;
 
@@ -2539,16 +2570,20 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
 {
-       struct vm_struct *v = p;
+       struct vmap_area *va = p, *next;
 
        ++*pos;
-       return v->next;
+       next = list_entry(va->list.next, typeof(*va), list);
+       if (&next->list != &vmap_area_list)
+               return next;
+
+       return NULL;
 }
 
 static void s_stop(struct seq_file *m, void *p)
-       __releases(&vmlist_lock)
+       __releases(&vmap_area_lock)
 {
-       read_unlock(&vmlist_lock);
+       spin_unlock(&vmap_area_lock);
 }
 
 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
@@ -2559,6 +2594,11 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
                if (!counters)
                        return;
 
+               /* Pair with smp_wmb() in insert_vmalloc_vmlist() */
+               smp_rmb();
+               if (v->flags & VM_UNLIST)
+                       return;
+
                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
 
                for (nr = 0; nr < v->nr_pages; nr++)
@@ -2572,7 +2612,20 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
 
 static int s_show(struct seq_file *m, void *p)
 {
-       struct vm_struct *v = p;
+       struct vmap_area *va = p;
+       struct vm_struct *v;
+
+       if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+               return 0;
+
+       if (!(va->flags & VM_VM_AREA)) {
+               seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
+                       (void *)va->va_start, (void *)va->va_end,
+                                       va->va_end - va->va_start);
+               return 0;
+       }
+
+       v = va->vm;
 
        seq_printf(m, "0x%pK-0x%pK %7ld",
                v->addr, v->addr + v->size, v->size);
@@ -2645,5 +2698,53 @@ static int __init proc_vmalloc_init(void)
        return 0;
 }
 module_init(proc_vmalloc_init);
+
+void get_vmalloc_info(struct vmalloc_info *vmi)
+{
+       struct vmap_area *va;
+       unsigned long free_area_size;
+       unsigned long prev_end;
+
+       vmi->used = 0;
+       vmi->largest_chunk = 0;
+
+       prev_end = VMALLOC_START;
+
+       spin_lock(&vmap_area_lock);
+
+       if (list_empty(&vmap_area_list)) {
+               vmi->largest_chunk = VMALLOC_TOTAL;
+               goto out;
+       }
+
+       list_for_each_entry(va, &vmap_area_list, list) {
+               unsigned long addr = va->va_start;
+
+               /*
+                * Some archs keep another range for modules in vmalloc space
+                */
+               if (addr < VMALLOC_START)
+                       continue;
+               if (addr >= VMALLOC_END)
+                       break;
+
+               if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+                       continue;
+
+               vmi->used += (va->va_end - va->va_start);
+
+               free_area_size = addr - prev_end;
+               if (vmi->largest_chunk < free_area_size)
+                       vmi->largest_chunk = free_area_size;
+
+               prev_end = va->va_end;
+       }
+
+       if (VMALLOC_END - prev_end > vmi->largest_chunk)
+               vmi->largest_chunk = VMALLOC_END - prev_end;
+
+out:
+       spin_unlock(&vmap_area_lock);
+}
 #endif